67 Commits

Author SHA1 Message Date
yu
e672bd426c update 2020-03-06 17:10:28 +08:00
joy.zhou
55f1f1aa80 Update deploy.yaml 2020-01-19 11:19:21 +08:00
joy.zhou
ccb7c37b96 Update svc.yaml 2020-01-19 11:18:44 +08:00
joy.zhou
7e29cc7213 Update svc.yaml 2020-01-19 11:18:38 +08:00
winglq
1971b5c324 update retained message even if it's already there (#70)
Signed-off-by: Liu Qing <winglq@gmail.com>
2020-01-06 11:22:59 +08:00
foosinn
fb453e8c0f fix ipv6 addresses (#68) 2019-12-30 13:42:31 +08:00
joy.zhou
eef900ad2f Update comm.go 2019-12-25 17:14:44 +08:00
joy.zhou
d24e0dac13 Update info.go 2019-12-25 17:14:11 +08:00
joy.zhou
fd0622710b Update client.go 2019-12-25 17:13:44 +08:00
joy.zhou
73dd5bb376 Update config.go 2019-12-25 17:13:16 +08:00
joy.zhou
474c557c7a Update sesson.go 2019-12-25 17:12:59 +08:00
joy.zhou
f3e7e5481a Update auth.go 2019-12-25 17:12:30 +08:00
joy.zhou
57fce9c7dc Update broker.go 2019-12-25 17:12:07 +08:00
joy.zhou
995898c5f4 Update main.go 2019-12-25 17:10:32 +08:00
joy.zhou
2404693bd2 fix issue #66 2019-12-12 15:07:12 +08:00
joy.zhou
68cd5e94a4 Merge branch 'master' of https://github.com/fhmq/hmq 2019-11-14 11:09:52 +08:00
joy.zhou
44fa819f62 update some logic 2019-11-14 11:09:15 +08:00
joy.zhou
2b7bb3fcd5 Update README.md 2019-11-11 21:08:21 +08:00
joy.zhou
4c107c67ab fix bug (#63)
* update

* update auth file

* fixbug
2019-11-11 11:41:38 +08:00
joy.zhou
896769fd9d Add acl (#61)
* update

* update auth file
2019-10-30 14:44:18 +08:00
joy.zhou
c7a51fe68f fixed 2019-09-30 11:06:05 +08:00
joy.zhou
a3fc611615 fix issue 2019-09-30 11:04:46 +08:00
H.K
e74b9facd1 fix: (#57)
topics used but not make
2019-09-30 10:50:40 +08:00
joy.zhou
53a79caad9 update deploy 2019-09-18 14:17:19 +08:00
joy.zhou
55576c1eb3 update kafka plugins 2019-09-18 14:00:19 +08:00
joy.zhou
80b64b147e delete acl file 2019-08-23 16:40:39 +08:00
joy.zhou
ea055d5929 update authhttp 2019-08-23 16:22:59 +08:00
joy.zhou
8d8707801f REMOVE NO USE 2019-08-20 10:27:15 +08:00
joy.zhou
fd2974a546 update Readme 2019-08-19 10:57:29 +08:00
joy.zhou
72211efedf Merge branches 'plugin_update' and 'master' of https://github.com/fhmq/hmq 2019-08-19 10:48:55 +08:00
joy.zhou
7e15da209e Plugin update (#48)
* replace plugin

* update plugin
2019-08-19 10:35:17 +08:00
joy.zhou
69a26f8cd9 update plugin 2019-08-19 10:33:19 +08:00
joy.zhou
148738800b replace plugin 2019-08-16 18:18:19 +08:00
joy.zhou
e4e736d1e2 update readme.md 2019-08-02 10:10:27 +08:00
joy.zhou
4c5a48a44b Plugins update log (#47)
* modify

* update

* add acl

* add feature

* update dockerfile

* add deploy

* update

* update

* plugins

* plugins

* update

* update

* update

* fixed

* remove

* fixed

* add log

* update

* fixed

* update

* fix config

* add http api

* add http api

* resp

* add config for work chan

* update

* fixed

* update

* disable trace

* fixed

* change acl

* fixed

* fixed res

* dd

* dd

* ddd

* dd

* update

* fixed

* update

* add

* fixed

* update key

* add log

* update

* format

* update

* update auth

* update

* update readme

* added

* update

* fixed

* fixed

* fix

* upade

* update

* update

* update
2019-07-25 16:01:40 +08:00
joy.zhou
c6b1f1db42 Plugins support (#46)
* modify

* update

* add acl

* add feature

* update dockerfile

* add deploy

* update

* update

* plugins

* plugins

* update

* update

* update

* fixed

* remove

* fixed

* add log

* update

* fixed

* update

* fix config

* add http api

* add http api

* resp

* add config for work chan

* update

* fixed

* update

* disable trace

* fixed

* change acl

* fixed

* fixed res

* dd

* dd

* ddd

* dd

* update

* fixed

* update

* add

* fixed

* update key

* add log

* update

* format

* update

* update auth

* update

* update readme

* added

* update

* fixed

* fixed

* fix

* upade

* update

* update
2019-07-25 13:54:42 +08:00
Yuyan Zhou
daf4a0e0f5 add vendor 2019-04-24 15:45:34 +08:00
joy.zhou
c350d16ca1 add fix pool for message order (#42)
* fix pool for message order

* add go modules
2019-04-24 14:54:21 +08:00
Yuyan Zhou
edc46c1ee6 remove publish message check 2019-04-22 10:21:14 +08:00
joyz
6193be74fa Merge branch 'master' of https://github.com/fhmq/hmq 2019-01-22 22:11:59 +08:00
joyz
90beada459 some modify 2019-01-22 22:11:54 +08:00
Marc Magnin
6c7fe6a0f7 simple fix (#35) 2019-01-07 19:56:00 +08:00
joyz
2b56664d85 remove no use 2018-12-27 21:22:32 +08:00
joy.zhou
7547ad3bdc Restruct (#34)
* modify

* remove

* modify

* modify

* remove no use

* add online/offline notification

* modify

* format log

* add reference
2018-12-26 14:51:13 +08:00
joy.zhou
84e7fe2490 context (#28) 2018-05-10 13:13:36 +08:00
zhouyuyan
684584b208 fix write logic 2018-04-28 09:37:37 +08:00
zhouyuyan
56fb4a2d54 fix issue 25 2018-04-28 09:08:28 +08:00
joy.zhou
5ed4728575 Wpool (#23)
* pool

* pool

* wpool
2018-04-04 13:49:52 +08:00
zhouyuyan
c0fea6a5ba modify_message_pool 2018-02-24 13:19:43 +08:00
zhouyuyan
47500910e1 fix broker out painc 2018-02-06 11:01:06 +08:00
joy.zhou
0ff20b6ee2 Update README.md 2018-02-03 13:11:53 +08:00
joy.zhou
7155667f6c Pool (#16)
* add pool

* elastic workerpool

* del buf

* modify usage

* modify readme
2018-02-03 12:42:25 +08:00
zhouyuyan
83db82cdcc Merge branch 'master' of https://github.com/fhmq/hmq 2018-01-31 11:00:29 +08:00
zhouyuyan
b3653bcfb1 fix #14 2018-01-31 10:59:59 +08:00
joy.zhou
221d00480e update read.me 2018-01-26 16:29:14 +08:00
zhouyuyan
91733bf91e modify debug log 2018-01-26 15:47:34 +08:00
Marc Magnin
ef252550dc fhmq/hmq#5 added zap logger (#11) 2018-01-26 13:51:36 +08:00
joy.zhou
1058256235 update readme 2018-01-25 19:34:37 +08:00
joy.zhou
5a569f14a3 del debug info
delete debug message body
2018-01-25 19:31:47 +08:00
zhouyuyan
93b21777ff add lisence 2018-01-25 13:47:50 +08:00
zhouyuyan
dcf2934e1b add flag for hmq 2018-01-25 13:11:45 +08:00
joy.zhou
d9e6e216b0 Merge pull request #4 from MarcMagnin/master
fhmq/hmq#2 added full package ref
2018-01-24 18:14:13 +08:00
Marc Magnin
ca3951769a fhmq/hmq#2 added full package ref 2018-01-23 15:29:16 +01:00
zhouyuyan
0439e7ce90 fxi ws conn 2018-01-22 09:30:08 +08:00
zhouyuyan
dc0f2185ab skip self 2018-01-19 13:53:47 +08:00
zhouyuyan
7462afcfb5 modify readme 2018-01-19 13:49:53 +08:00
zhouyuyan
114e6f901e modify cluster 2018-01-19 13:41:17 +08:00
51 changed files with 3414 additions and 1297 deletions

1
.gitignore vendored
View File

@@ -1,3 +1,4 @@
hmq
log
log/*
*.test

8
.vscode/settings.json vendored Normal file
View File

@@ -0,0 +1,8 @@
{
"go.lintFlags": [
"--disable=all",
"--enable=errcheck,varcheck,deadcode",
"--enable=varcheck",
"--enable=deadcode"
]
}

View File

@@ -1,11 +1,12 @@
FROM alpine
COPY hmq /
COPY ssl /ssl
COPY conf /conf
FROM golang:1.12 as builder
WORKDIR /go/src/github.com/fhmq/hmq
COPY . .
RUN CGO_ENABLED=0 go build -o hmq -a -ldflags '-extldflags "-static"' .
FROM alpine:3.8
WORKDIR /
COPY --from=builder /go/src/github.com/fhmq/hmq/hmq .
EXPOSE 1883
EXPOSE 1888
EXPOSE 8883
EXPOSE 1993
CMD ["/hmq"]

201
LICENSE Normal file
View File

@@ -0,0 +1,201 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "{}"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright {yyyy} {name of copyright owner}
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

143
README.md
View File

@@ -3,18 +3,40 @@ Free and High Performance MQTT Broker
## About
Golang MQTT Broker, Version 3.1.1, and Compatible
for [eclipse paho client](https://github.com/eclipse?utf8=%E2%9C%93&q=mqtt&type=&language=)
Download: [click here](https://github.com/fhmq/hmq/releases)
for [eclipse paho client](https://github.com/eclipse?utf8=%E2%9C%93&q=mqtt&type=&language=) and mosquitto-client
## RUNNING
```bash
$ git clone https://github.com/fhmq/hmq.git
$ cd hmq
$ go get github.com/fhmq/hmq
$ cd $GOPATH/github.com/fhmq/hmq
$ go run main.go
```
### broker.config
## Usage of hmq:
~~~
Usage: hmq [options]
Broker Options:
-w, --worker <number> Worker num to process message, perfer (client num)/10. (default 1024)
-p, --port <port> Use port for clients (default: 1883)
--host <host> Network host to listen on. (default "0.0.0.0")
-ws, --wsport <port> Use port for websocket monitoring
-wsp,--wspath <path> Use path for websocket monitoring
-c, --config <file> Configuration file
Logging Options:
-d, --debug <bool> Enable debugging output (default false)
-D Debug enabled
Cluster Options:
-r, --router <rurl> Router who maintenance cluster info
-cp, --clusterport <cluster-port> Cluster listen port for others
Common Options:
-h, --help Show this message
~~~
### hmq.config
~~~
{
"workerNum": 4096,
@@ -22,9 +44,9 @@ $ go run main.go
"host": "0.0.0.0",
"cluster": {
"host": "0.0.0.0",
"port": "1993",
"routers": ["10.10.0.11:1993","10.10.0.12:1993"]
"port": "1993"
},
"router": "127.0.0.1:9888",
"wsPort": "1888",
"wsPath": "/ws",
"wsTLS": true,
@@ -36,8 +58,10 @@ $ go run main.go
"certFile": "tls/server/cert.pem",
"keyFile": "tls/server/key.pem"
},
"acl":true,
"aclConf":"conf/acl.conf"
"plugins": {
"auth": "authhttp",
"bridge": "kafka"
}
}
~~~
@@ -53,72 +77,47 @@ $ go run main.go
* Supports will messages
* Queue subscribe
* Websocket Support
* TLS/SSL Support
* Flexible ACL
* Auth Support
* Auth Connect
* Auth ACL
* Cache Support
### QUEUE SUBSCRIBE
* Kafka Bridge Support
* Action Deliver
* Regexp Deliver
* HTTP API
* Disconnect Connect (future more)
### Share SUBSCRIBE
~~~
| Prefix | Examples |
| ------------- |---------------------------------|
| $queue/ | mosquitto_sub -t $queue/topic |
| Prefix | Examples | Publish |
| ------------------- |-------------------------------------------|--------------------------- --|
| $share/<group>/topic | mosquitto_sub -t $share/<group>/topic | mosquitto_pub -t topic |
~~~
### ACL Configure
#### The ACL rules define:
~~~
Allow | type | value | pubsub | Topics
~~~
#### ACL Config
~~~
## type clientid , username, ipaddr
##pub 1 , sub 2, pubsub 3
## %c is clientid , %u is username
allow ip 127.0.0.1 2 $SYS/#
allow clientid 0001 3 #
allow username admin 3 #
allow username joy 3 /test,hello/world
allow clientid * 1 toCloud/%c
allow username * 1 toCloud/%u
deny clientid * 3 #
~~~
### Cluster
```bash
1, start router for hmq (https://github.com/fhmq/router.git)
$ go get github.com/fhmq/router
$ cd $GOPATH/github.com/fhmq/router
$ go run main.go
2, config router in hmq.config ("router": "127.0.0.1:9888")
```
Other Version Of Cluster Based On gRPC: [click here](https://github.com/fhmq/rhmq)
~~~
#allow local sub $SYS topic
allow ip 127.0.0.1 2 $SYS/#
~~~
~~~
#allow client who's id with 0001 or username with admin pub sub all topic
allow clientid 0001 3 #
allow username admin 3 #
~~~
~~~
#allow client with the username joy can pub sub topic '/test' and 'hello/world'
allow username joy 3 /test,hello/world
~~~
~~~
#allow all client pub the topic toCloud/{clientid/username}
allow clientid * 1 toCloud/%c
allow username * 1 toCloud/%u
~~~
~~~
#deny all client pub sub all topic
deny clientid * 3 #
~~~
Client match acl rule one by one
~~~
--------- --------- ---------
Client -> | Rule1 | --nomatch--> | Rule2 | --nomatch--> | Rule3 | -->
--------- --------- ---------
| | |
match match match
\|/ \|/ \|/
allow | deny allow | deny allow | deny
~~~
### Online/Offline Notification
```bash
topic:
$SYS/broker/connection/clients/<clientID>
payload:
{"clientID":"client001","online":true/false,"timestamp":"2018-10-25T09:32:32Z"}
```
## Performance
@@ -132,3 +131,13 @@ Client -> | Rule1 | --nomatch--> | Rule2 | --nomatch--> | Rule3 | -->
## License
* Apache License Version 2.0
## Reference
* Surgermq.(https://github.com/surgemq/surgemq)
## Benchmark Tool
* https://github.com/inovex/mqtt-stresser
* https://github.com/krylovsk/mqtt-benchmark

View File

@@ -1,80 +1,40 @@
package broker
import (
"hmq/lib/acl"
"strings"
log "github.com/cihub/seelog"
"github.com/fsnotify/fsnotify"
)
const (
PUB = 1
SUB = 2
SUB = "1"
PUB = "2"
)
func (c *client) CheckTopicAuth(typ int, topic string) bool {
if c.typ != CLIENT || !c.broker.config.Acl {
return true
}
if strings.HasPrefix(topic, "$queue/") {
topic = string([]byte(topic)[7:])
if topic == "" {
return false
func (b *Broker) CheckTopicAuth(action, clientID, username, ip, topic string) bool {
if b.auth != nil {
if strings.HasPrefix(topic, "$SYS/broker/connection/clients/") {
return true
}
if strings.HasPrefix(topic, "$share/") && action == SUB {
substr := groupCompile.FindStringSubmatch(topic)
if len(substr) != 3 {
return false
}
topic = substr[2]
}
return b.auth.CheckACL(action, clientID, username, ip, topic)
}
ip := c.info.remoteIP
username := string(c.info.username)
clientid := string(c.info.clientID)
aclInfo := c.broker.AclConfig
return acl.CheckTopicAuth(aclInfo, typ, ip, username, clientid, topic)
return true
}
var (
watchList = []string{"./conf"}
)
func (b *Broker) handleFsEvent(event fsnotify.Event) error {
switch event.Name {
case b.config.AclConf:
if event.Op&fsnotify.Write == fsnotify.Write ||
event.Op&fsnotify.Create == fsnotify.Create {
log.Info("text:handling acl config change event:", event)
aclconfig, err := acl.AclConfigLoad(event.Name)
if err != nil {
log.Error("aclconfig change failed, load acl conf error: ", err)
return err
}
b.AclConfig = aclconfig
}
func (b *Broker) CheckConnectAuth(clientID, username, password string) bool {
if b.auth != nil {
return b.auth.CheckConnect(clientID, username, password)
}
return nil
}
func (b *Broker) StartAclWatcher() {
go func() {
wch, e := fsnotify.NewWatcher()
if e != nil {
log.Error("start monitor acl config file error,", e)
return
}
defer wch.Close()
return true
for _, i := range watchList {
if err := wch.Add(i); err != nil {
log.Error("start monitor acl config file error,", err)
return
}
}
log.Info("watching acl config file change...")
for {
select {
case evt := <-wch.Events:
b.handleFsEvent(evt)
case err := <-wch.Errors:
log.Error("error:", err.Error())
}
}
}()
}

15
broker/bridge.go Normal file
View File

@@ -0,0 +1,15 @@
package broker
import (
"github.com/fhmq/hmq/plugins/bridge"
"go.uber.org/zap"
)
func (b *Broker) Publish(e *bridge.Elements) {
if b.bridgeMQ != nil {
err := b.bridgeMQ.Publish(e)
if err != nil {
log.Error("send message to mq error.", zap.Error(err))
}
}
}

View File

@@ -2,71 +2,126 @@ package broker
import (
"crypto/tls"
"hmq/lib/acl"
"fmt"
"net"
"net/http"
"runtime/debug"
"sync"
"sync/atomic"
"time"
"github.com/fhmq/hmq/plugins/bridge"
"github.com/fhmq/hmq/plugins/auth"
"github.com/fhmq/hmq/broker/lib/sessions"
"github.com/fhmq/hmq/broker/lib/topics"
"github.com/eclipse/paho.mqtt.golang/packets"
"github.com/shirou/gopsutil/mem"
"github.com/fhmq/hmq/pool"
"go.uber.org/zap"
"golang.org/x/net/websocket"
log "github.com/cihub/seelog"
)
const (
MessagePoolNum = 1024
MessagePoolMessageNum = 1024
)
type Message struct {
client *client
packet packets.ControlPacket
}
type Broker struct {
id string
cid uint64
mu sync.Mutex
config *Config
tlsConfig *tls.Config
AclConfig *acl.ACLConfig
clients sync.Map
routes sync.Map
remotes sync.Map
sl *Sublist
rl *RetainList
queues map[string]int
id string
mu sync.Mutex
config *Config
tlsConfig *tls.Config
wpool *pool.WorkerPool
clients sync.Map
routes sync.Map
remotes sync.Map
nodes map[string]interface{}
clusterPool chan *Message
topicsMgr *topics.Manager
sessionMgr *sessions.Manager
auth auth.Auth
bridgeMQ bridge.BridgeMQ
}
func newMessagePool() []chan *Message {
pool := make([]chan *Message, 0)
for i := 0; i < MessagePoolNum; i++ {
ch := make(chan *Message, MessagePoolMessageNum)
pool = append(pool, ch)
}
return pool
}
func NewBroker(config *Config) (*Broker, error) {
b := &Broker{
id: GenUniqueId(),
config: config,
sl: NewSublist(),
rl: NewRetainList(),
queues: make(map[string]int),
if config == nil {
config = DefaultConfig
}
b := &Broker{
id: GenUniqueId(),
config: config,
wpool: pool.New(config.Worker),
nodes: make(map[string]interface{}),
clusterPool: make(chan *Message),
}
var err error
b.topicsMgr, err = topics.NewManager("mem")
if err != nil {
log.Error("new topic manager error", zap.Error(err))
return nil, err
}
b.sessionMgr, err = sessions.NewManager("mem")
if err != nil {
log.Error("new session manager error", zap.Error(err))
return nil, err
}
if b.config.TlsPort != "" {
tlsconfig, err := NewTLSConfig(b.config.TlsInfo)
if err != nil {
log.Error("new tlsConfig error: ", err)
log.Error("new tlsConfig error", zap.Error(err))
return nil, err
}
b.tlsConfig = tlsconfig
}
if b.config.Acl {
aclconfig, err := acl.AclConfigLoad(b.config.AclConf)
if err != nil {
log.Error("Load acl conf error: ", err)
return nil, err
}
b.AclConfig = aclconfig
b.StartAclWatcher()
}
b.auth = auth.NewAuth(b.config.Plugin.Auth)
b.bridgeMQ = bridge.NewBridgeMQ(b.config.Plugin.Bridge)
return b, nil
}
func (b *Broker) SubmitWork(clientId string, msg *Message) {
if b.wpool == nil {
b.wpool = pool.New(b.config.Worker)
}
if msg.client.typ == CLUSTER {
b.clusterPool <- msg
} else {
b.wpool.Submit(clientId, func() {
ProcessMessage(msg)
})
}
}
func (b *Broker) Start() {
if b == nil {
log.Error("broker is null")
return
}
StartDispatcher()
if b.config.HTTPPort != "" {
go InitHTTPMoniter(b)
}
//listen clinet over tcp
if b.config.Port != "" {
@@ -89,32 +144,17 @@ func (b *Broker) Start() {
}
//connect on other node in cluster
if len(b.config.Cluster.Routes) > 0 {
b.ConnectToRouters()
if b.config.Router != "" {
go b.processClusterInfo()
b.ConnectToDiscovery()
}
//system montior
go StateMonitor()
}
func StateMonitor() {
v, _ := mem.VirtualMemory()
timeSticker := time.NewTicker(time.Second * 30)
for {
select {
case <-timeSticker.C:
if v.UsedPercent > 75 {
debug.FreeOSMemory()
}
}
}
}
func (b *Broker) StartWebsocketListening() {
path := b.config.WsPath
hp := ":" + b.config.WsPort
log.Info("Start Webscoker Listening on ", hp, path)
log.Info("Start Websocket Listener on:", zap.String("hp", hp), zap.String("path", path))
http.Handle(path, websocket.Handler(b.wsHandler))
var err error
if b.config.WsTLS {
@@ -123,16 +163,15 @@ func (b *Broker) StartWebsocketListening() {
err = http.ListenAndServe(hp, nil)
}
if err != nil {
log.Error("ListenAndServe: " + err.Error())
log.Error("ListenAndServe:" + err.Error())
return
}
}
func (b *Broker) wsHandler(ws *websocket.Conn) {
// io.Copy(ws, ws)
atomic.AddUint64(&b.cid, 1)
ws.PayloadType = websocket.BinaryFrame
b.handleConnection(CLIENT, ws, b.cid)
b.handleConnection(CLIENT, ws)
}
func (b *Broker) StartClientListening(Tls bool) {
@@ -142,14 +181,14 @@ func (b *Broker) StartClientListening(Tls bool) {
if Tls {
hp = b.config.TlsHost + ":" + b.config.TlsPort
l, err = tls.Listen("tcp", hp, b.tlsConfig)
log.Info("Start TLS Listening client on ", hp)
log.Info("Start TLS Listening client on ", zap.String("hp", hp))
} else {
hp := b.config.Host + ":" + b.config.Port
l, err = net.Listen("tcp", hp)
log.Info("Start Listening client on ", hp)
log.Info("Start Listening client on ", zap.String("hp", hp))
}
if err != nil {
log.Error("Error listening on ", err)
log.Error("Error listening on ", zap.Error(err))
return
}
tmpDelay := 10 * ACCEPT_MIN_SLEEP
@@ -158,92 +197,61 @@ func (b *Broker) StartClientListening(Tls bool) {
if err != nil {
if ne, ok := err.(net.Error); ok && ne.Temporary() {
log.Error("Temporary Client Accept Error(%v), sleeping %dms",
ne, tmpDelay/time.Millisecond)
zap.Error(ne), zap.Duration("sleeping", tmpDelay/time.Millisecond))
time.Sleep(tmpDelay)
tmpDelay *= 2
if tmpDelay > ACCEPT_MAX_SLEEP {
tmpDelay = ACCEPT_MAX_SLEEP
}
} else {
log.Error("Accept error: %v", err)
log.Error("Accept error: %v", zap.Error(err))
}
continue
}
tmpDelay = ACCEPT_MIN_SLEEP
atomic.AddUint64(&b.cid, 1)
go b.handleConnection(CLIENT, conn, b.cid)
go b.handleConnection(CLIENT, conn)
}
}
func (b *Broker) Handshake(conn net.Conn) bool {
nc := tls.Server(conn, b.tlsConfig)
time.AfterFunc(DEFAULT_TLS_TIMEOUT, func() { TlsTimeout(nc) })
nc.SetReadDeadline(time.Now().Add(DEFAULT_TLS_TIMEOUT))
// Force handshake
if err := nc.Handshake(); err != nil {
log.Error("TLS handshake error, ", err)
return false
}
nc.SetReadDeadline(time.Time{})
return true
}
func TlsTimeout(conn *tls.Conn) {
nc := conn
// Check if already closed
if nc == nil {
return
}
cs := nc.ConnectionState()
if !cs.HandshakeComplete {
log.Error("TLS handshake timeout")
nc.Close()
}
}
func (b *Broker) StartClusterListening() {
var hp string = b.config.Cluster.Host + ":" + b.config.Cluster.Port
log.Info("Start Listening cluster on ", hp)
log.Info("Start Listening cluster on ", zap.String("hp", hp))
l, e := net.Listen("tcp", hp)
if e != nil {
log.Error("Error listening on ", e)
log.Error("Error listening on ", zap.Error(e))
return
}
var idx uint64 = 0
tmpDelay := 10 * ACCEPT_MIN_SLEEP
for {
conn, err := l.Accept()
if err != nil {
if ne, ok := err.(net.Error); ok && ne.Temporary() {
log.Error("Temporary Client Accept Error(%v), sleeping %dms",
ne, tmpDelay/time.Millisecond)
zap.Error(ne), zap.Duration("sleeping", tmpDelay/time.Millisecond))
time.Sleep(tmpDelay)
tmpDelay *= 2
if tmpDelay > ACCEPT_MAX_SLEEP {
tmpDelay = ACCEPT_MAX_SLEEP
}
} else {
log.Error("Accept error: %v", err)
log.Error("Accept error: %v", zap.Error(err))
}
continue
}
tmpDelay = ACCEPT_MIN_SLEEP
go b.handleConnection(ROUTER, conn, idx)
go b.handleConnection(ROUTER, conn)
}
}
func (b *Broker) handleConnection(typ int, conn net.Conn, idx uint64) {
func (b *Broker) handleConnection(typ int, conn net.Conn) {
//process connect packet
packet, err := packets.ReadPacket(conn)
if err != nil {
log.Error("read connect packet error: ", err)
log.Error("read connect packet error: ", zap.Error(err))
return
}
if packet == nil {
@@ -255,12 +263,35 @@ func (b *Broker) handleConnection(typ int, conn net.Conn, idx uint64) {
log.Error("received msg that was not Connect")
return
}
log.Info("read connect from ", zap.String("clientID", msg.ClientIdentifier))
connack := packets.NewControlPacket(packets.Connack).(*packets.ConnackPacket)
connack.ReturnCode = packets.Accepted
connack.SessionPresent = msg.CleanSession
connack.ReturnCode = msg.Validate()
if connack.ReturnCode != packets.Accepted {
err = connack.Write(conn)
if err != nil {
log.Error("send connack error, ", zap.Error(err), zap.String("clientID", msg.ClientIdentifier))
return
}
return
}
if typ == CLIENT && !b.CheckConnectAuth(string(msg.ClientIdentifier), string(msg.Username), string(msg.Password)) {
connack.ReturnCode = packets.ErrRefusedNotAuthorised
err = connack.Write(conn)
if err != nil {
log.Error("send connack error, ", zap.Error(err), zap.String("clientID", msg.ClientIdentifier))
return
}
return
}
err = connack.Write(conn)
if err != nil {
log.Error("send connack error, ", err, " clientID = ", msg.ClientIdentifier)
log.Error("send connack error, ", zap.Error(err), zap.String("clientID", msg.ClientIdentifier))
return
}
@@ -291,64 +322,128 @@ func (b *Broker) handleConnection(typ int, conn net.Conn, idx uint64) {
c.init()
err = b.getSession(c, msg, connack)
if err != nil {
log.Error("get session error: ", zap.String("clientID", c.info.clientID))
return
}
cid := c.info.clientID
var msgPool *MessagePool
var exist bool
var old interface{}
switch typ {
case CLIENT:
msgPool = MSGPool[idx%MessagePoolNum].GetPool()
c.mp = msgPool
old, exist = b.clients.Load(cid)
if exist {
log.Warn("client exist, close old...", " clientID = ", c.info.clientID)
log.Warn("client exist, close old...", zap.String("clientID", c.info.clientID))
ol, ok := old.(*client)
if ok {
msg := &Message{client: c, packet: DisconnectdPacket}
ol.mp.queue <- msg
ol.Close()
}
}
b.clients.Store(cid, c)
b.OnlineOfflineNotification(cid, true)
{
b.Publish(&bridge.Elements{
ClientID: string(msg.ClientIdentifier),
Username: string(msg.Username),
Action: bridge.Connect,
Timestamp: time.Now().Unix(),
})
}
case ROUTER:
msgPool = MSGPool[(MessagePoolNum + idx)].GetPool()
c.mp = msgPool
old, exist = b.routes.Load(cid)
if exist {
log.Warn("router exist, close old...")
ol, ok := old.(*client)
if ok {
msg := &Message{client: c, packet: DisconnectdPacket}
ol.mp.queue <- msg
ol.Close()
}
}
b.routes.Store(cid, c)
}
c.readLoop()
}
func (b *Broker) ConnectToDiscovery() {
var conn net.Conn
var err error
var tempDelay time.Duration = 0
for {
conn, err = net.Dial("tcp", b.config.Router)
if err != nil {
log.Error("Error trying to connect to route: ", zap.Error(err))
log.Debug("Connect to route timeout ,retry...")
if 0 == tempDelay {
tempDelay = 1 * time.Second
} else {
tempDelay *= 2
}
if max := 20 * time.Second; tempDelay > max {
tempDelay = max
}
time.Sleep(tempDelay)
continue
}
break
}
log.Debug("connect to router success :", zap.String("Router", b.config.Router))
cid := b.id
info := info{
clientID: cid,
keepalive: 60,
}
c := &client{
typ: CLUSTER,
broker: b,
conn: conn,
info: info,
}
c.init()
c.SendConnect()
c.SendInfo()
go c.readLoop()
if typ == ROUTER {
c.SendInfo()
c.StartPing()
}
go c.StartPing()
}
func (b *Broker) ConnectToRouters() {
for _, v := range b.config.Cluster.Routes {
go b.connectRouter(v, "")
func (b *Broker) processClusterInfo() {
for {
msg, ok := <-b.clusterPool
if !ok {
log.Error("read message from cluster channel error")
return
}
ProcessMessage(msg)
}
}
func (b *Broker) connectRouter(url, remoteID string) {
func (b *Broker) connectRouter(id, addr string) {
var conn net.Conn
var err error
var timeDelay time.Duration = 0
retryTimes := 0
max := 32 * time.Second
for {
conn, err = net.Dial("tcp", url)
if !b.checkNodeExist(id, addr) {
return
}
conn, err = net.Dial("tcp", addr)
if err != nil {
log.Error("Error trying to connect to route: ", err)
log.Error("Error trying to connect to route: ", zap.Error(err))
if retryTimes > 50 {
return
@@ -372,8 +467,8 @@ func (b *Broker) connectRouter(url, remoteID string) {
break
}
route := route{
remoteID: remoteID,
remoteUrl: conn.RemoteAddr().String(),
remoteID: id,
remoteUrl: addr,
}
cid := GenUniqueId()
@@ -392,16 +487,36 @@ func (b *Broker) connectRouter(url, remoteID string) {
c.init()
b.remotes.Store(cid, c)
c.mp = MSGPool[(MessagePoolNum + 1)].GetPool()
c.SendConnect()
c.SendInfo()
// mpool := b.messagePool[fnv1a.HashString64(cid)%MessagePoolNum]
go c.readLoop()
go c.StartPing()
}
func (b *Broker) checkNodeExist(id, url string) bool {
if id == b.id {
return false
}
for k, v := range b.nodes {
if k == id {
return true
}
//skip
l, ok := v.(string)
if ok {
if url == l {
return true
}
}
}
return false
}
func (b *Broker) CheckRemoteExist(remoteID, url string) bool {
exist := false
b.remotes.Range(func(key, value interface{}) bool {
@@ -423,9 +538,9 @@ func (b *Broker) SendLocalSubsToRouter(c *client) {
b.clients.Range(func(key, value interface{}) bool {
client, ok := value.(*client)
if ok {
subs := client.subs
subs := client.subMap
for _, sub := range subs {
subInfo.Topics = append(subInfo.Topics, string(sub.topic))
subInfo.Topics = append(subInfo.Topics, sub.topic)
subInfo.Qoss = append(subInfo.Qoss, sub.qos)
}
}
@@ -434,7 +549,7 @@ func (b *Broker) SendLocalSubsToRouter(c *client) {
if len(subInfo.Topics) > 0 {
err := c.WriterPacket(subInfo)
if err != nil {
log.Error("Send localsubs To Router error :", err)
log.Error("Send localsubs To Router error :", zap.Error(err))
}
}
}
@@ -481,17 +596,22 @@ func (b *Broker) removeClient(c *client) {
}
func (b *Broker) PublishMessage(packet *packets.PublishPacket) {
topic := packet.TopicName
r := b.sl.Match(topic)
if len(r.psubs) == 0 {
var subs []interface{}
var qoss []byte
b.mu.Lock()
err := b.topicsMgr.Subscribers([]byte(packet.TopicName), packet.Qos, &subs, &qoss)
b.mu.Unlock()
if err != nil {
log.Error("search sub client error, ", zap.Error(err))
return
}
for _, sub := range r.psubs {
if sub != nil {
err := sub.client.WriterPacket(packet)
for _, sub := range subs {
s, ok := sub.(*subscription)
if ok {
err := s.client.WriterPacket(packet)
if err != nil {
log.Error("process message for psub error, ", err)
log.Error("write message error, ", zap.Error(err))
}
}
}
@@ -508,3 +628,12 @@ func (b *Broker) BroadcastUnSubscribe(subs map[string]*subscription) {
b.BroadcastSubOrUnsubMessage(unsub)
}
}
func (b *Broker) OnlineOfflineNotification(clientID string, online bool) {
packet := packets.NewControlPacket(packets.Publish).(*packets.PublishPacket)
packet.TopicName = "$SYS/broker/connection/clients/" + clientID
packet.Qos = 0
packet.Payload = []byte(fmt.Sprintf(`{"clientID":"%s","online":%v,"timestamp":"%s"}`, clientID, online, time.Now().UTC().Format(time.RFC3339)))
b.PublishMessage(packet)
}

View File

@@ -1,56 +1,74 @@
package broker
import (
"context"
"errors"
"math/rand"
"net"
"reflect"
"regexp"
"strings"
"sync"
"time"
"github.com/eclipse/paho.mqtt.golang/packets"
"github.com/fhmq/hmq/broker/lib/sessions"
"github.com/fhmq/hmq/broker/lib/topics"
"github.com/fhmq/hmq/plugins/bridge"
log "github.com/cihub/seelog"
"github.com/eclipse/paho.mqtt.golang/packets"
"go.uber.org/zap"
)
const (
// special pub topic for cluster info BrokerInfoTopic
BrokerInfoTopic = "broker001info/brokerinfo"
BrokerInfoTopic = "broker000100101info"
// CLIENT is an end user.
CLIENT = 0
// ROUTER is another router in the cluster.
ROUTER = 1
//REMOTE is the router connect to other cluster
REMOTE = 2
REMOTE = 2
CLUSTER = 3
)
const (
_GroupTopicRegexp = `^\$share/([0-9a-zA-Z_-]+)/(.*)$`
)
const (
Connected = 1
Disconnected = 2
)
type client struct {
typ int
mu sync.Mutex
broker *Broker
conn net.Conn
info info
route route
status int
closed chan int
smu sync.RWMutex
mp *MessagePool
subs map[string]*subscription
rsubs map[string]*subInfo
}
var (
groupCompile = regexp.MustCompile(_GroupTopicRegexp)
)
type subInfo struct {
sub *subscription
num int
type client struct {
typ int
mu sync.Mutex
broker *Broker
conn net.Conn
info info
route route
status int
ctx context.Context
cancelFunc context.CancelFunc
session *sessions.Session
subMap map[string]*subscription
topicsMgr *topics.Manager
subs []interface{}
qoss []byte
rmsgs []*packets.PublishPacket
routeSubMap map[string]uint64
}
type subscription struct {
client *client
topic string
qos byte
queue bool
client *client
topic string
qos byte
share bool
groupName string
}
type info struct {
@@ -70,72 +88,63 @@ type route struct {
var (
DisconnectdPacket = packets.NewControlPacket(packets.Disconnect).(*packets.DisconnectPacket)
r = rand.New(rand.NewSource(time.Now().UnixNano()))
)
func (c *client) init() {
c.smu.Lock()
defer c.smu.Unlock()
c.status = Connected
c.closed = make(chan int, 1)
c.rsubs = make(map[string]*subInfo)
c.subs = make(map[string]*subscription, 10)
c.info.localIP = strings.Split(c.conn.LocalAddr().String(), ":")[0]
c.info.remoteIP = strings.Split(c.conn.RemoteAddr().String(), ":")[0]
}
func (c *client) keepAlive(ch chan int) {
defer close(ch)
keepalive := time.Duration(c.info.keepalive*3/2) * time.Second
timer := time.NewTimer(keepalive)
msgPool := c.mp
for {
select {
case <-ch:
timer.Reset(keepalive)
case <-timer.C:
log.Error("Client exceeded timeout, disconnecting. clientID = ", c.info.clientID, " keepalive = ", c.info.keepalive)
msg := &Message{client: c, packet: DisconnectdPacket}
msgPool.queue <- msg
timer.Stop()
return
case _, ok := <-c.closed:
if !ok {
return
}
}
}
c.info.localIP, _, _ = net.SplitHostPort(c.conn.LocalAddr().String())
c.info.remoteIP, _, _ = net.SplitHostPort(c.conn.RemoteAddr().String())
c.ctx, c.cancelFunc = context.WithCancel(context.Background())
c.subMap = make(map[string]*subscription)
c.topicsMgr = c.broker.topicsMgr
}
func (c *client) readLoop() {
nc := c.conn
msgPool := c.mp
if nc == nil || msgPool == nil {
b := c.broker
if nc == nil || b == nil {
return
}
ch := make(chan int, 1000)
go c.keepAlive(ch)
keepAlive := time.Second * time.Duration(c.info.keepalive)
timeOut := keepAlive + (keepAlive / 2)
for {
packet, err := packets.ReadPacket(nc)
if err != nil {
log.Error("read packet error: ", err, " clientID = ", c.info.clientID)
break
}
select {
case <-c.ctx.Done():
return
default:
//add read timeout
if err := nc.SetReadDeadline(time.Now().Add(timeOut)); err != nil {
log.Error("set read timeout error: ", zap.Error(err), zap.String("ClientID", c.info.clientID))
msg := &Message{
client: c,
packet: DisconnectdPacket,
}
b.SubmitWork(c.info.clientID, msg)
return
}
ch <- 1
packet, err := packets.ReadPacket(nc)
if err != nil {
log.Error("read packet error: ", zap.Error(err), zap.String("ClientID", c.info.clientID))
msg := &Message{
client: c,
packet: DisconnectdPacket,
}
b.SubmitWork(c.info.clientID, msg)
return
}
msg := &Message{
client: c,
packet: packet,
msg := &Message{
client: c,
packet: packet,
}
b.SubmitWork(c.info.clientID, msg)
}
msgPool.queue <- msg
}
msg := &Message{client: c, packet: DisconnectdPacket}
msgPool.queue <- msg
msgPool.Reduce()
}
func ProcessMessage(msg *Message) {
@@ -144,10 +153,13 @@ func ProcessMessage(msg *Message) {
if ca == nil {
return
}
log.Debug("Recv message: ", ca.String(), " clientID = ", c.info.clientID)
if c.typ == CLIENT {
log.Debug("Recv message:", zap.String("message type", reflect.TypeOf(msg.packet).String()[9:]), zap.String("ClientID", c.info.clientID))
}
switch ca.(type) {
case *packets.ConnackPacket:
case *packets.ConnectPacket:
case *packets.PublishPacket:
packet := ca.(*packets.PublishPacket)
@@ -170,23 +182,37 @@ func ProcessMessage(msg *Message) {
case *packets.DisconnectPacket:
c.Close()
default:
log.Info("Recv Unknow message.......", " clientID = ", c.info.clientID)
log.Info("Recv Unknow message.......", zap.String("ClientID", c.info.clientID))
}
}
func (c *client) ProcessPublish(packet *packets.PublishPacket) {
switch c.typ {
case CLIENT:
c.processClientPublish(packet)
case ROUTER:
c.processRouterPublish(packet)
case CLUSTER:
c.processRemotePublish(packet)
}
}
func (c *client) processRemotePublish(packet *packets.PublishPacket) {
if c.status == Disconnected {
return
}
topic := packet.TopicName
if topic == BrokerInfoTopic && c.typ != CLIENT {
if topic == BrokerInfoTopic {
c.ProcessInfo(packet)
return
}
if !c.CheckTopicAuth(PUB, topic) {
log.Error("Pub Topics Auth failed, ", topic, " clientID = ", c.info.clientID)
}
func (c *client) processRouterPublish(packet *packets.PublishPacket) {
if c.status == Disconnected {
return
}
@@ -197,115 +223,120 @@ func (c *client) ProcessPublish(packet *packets.PublishPacket) {
puback := packets.NewControlPacket(packets.Puback).(*packets.PubackPacket)
puback.MessageID = packet.MessageID
if err := c.WriterPacket(puback); err != nil {
log.Error("send puback error, ", err, " clientID = ", c.info.clientID)
log.Error("send puback error, ", zap.Error(err), zap.String("ClientID", c.info.clientID))
return
}
c.ProcessPublishMessage(packet)
case QosExactlyOnce:
return
default:
log.Error("publish with unknown qos", " clientID = ", c.info.clientID)
log.Error("publish with unknown qos", zap.String("ClientID", c.info.clientID))
return
}
if packet.Retain {
if b := c.broker; b != nil {
err := b.rl.Insert(topic, packet)
if err != nil {
log.Error("Insert Retain Message error: ", err, " clientID = ", c.info.clientID)
}
}
func (c *client) processClientPublish(packet *packets.PublishPacket) {
topic := packet.TopicName
if !c.broker.CheckTopicAuth(PUB, c.info.clientID, c.info.username, c.info.remoteIP, topic) {
log.Error("Pub Topics Auth failed, ", zap.String("topic", topic), zap.String("ClientID", c.info.clientID))
return
}
//publish kafka
c.broker.Publish(&bridge.Elements{
ClientID: c.info.clientID,
Username: c.info.username,
Action: bridge.Publish,
Timestamp: time.Now().Unix(),
Payload: string(packet.Payload),
Topic: topic,
})
switch packet.Qos {
case QosAtMostOnce:
c.ProcessPublishMessage(packet)
case QosAtLeastOnce:
puback := packets.NewControlPacket(packets.Puback).(*packets.PubackPacket)
puback.MessageID = packet.MessageID
if err := c.WriterPacket(puback); err != nil {
log.Error("send puback error, ", zap.Error(err), zap.String("ClientID", c.info.clientID))
return
}
c.ProcessPublishMessage(packet)
case QosExactlyOnce:
return
default:
log.Error("publish with unknown qos", zap.String("ClientID", c.info.clientID))
return
}
}
func (c *client) ProcessPublishMessage(packet *packets.PublishPacket) {
if c.status == Disconnected {
return
}
b := c.broker
if b == nil {
return
}
typ := c.typ
topic := packet.TopicName
r := b.sl.Match(topic)
// log.Info("psubs num: ", len(r.psubs))
if len(r.qsubs) == 0 && len(r.psubs) == 0 {
if packet.Retain {
if err := c.topicsMgr.Retain(packet); err != nil {
log.Error("Error retaining message: ", zap.Error(err), zap.String("ClientID", c.info.clientID))
}
}
err := c.topicsMgr.Subscribers([]byte(packet.TopicName), packet.Qos, &c.subs, &c.qoss)
if err != nil {
log.Error("Error retrieving subscribers list: ", zap.String("ClientID", c.info.clientID))
return
}
for _, sub := range r.psubs {
if sub.client.typ == REMOTE {
if typ == REMOTE {
continue
}
}
if sub != nil {
err := sub.client.WriterPacket(packet)
if err != nil {
log.Error("process message for psub error, ", err, " clientID = ", c.info.clientID)
}
}
// fmt.Println("psubs num: ", len(c.subs))
if len(c.subs) == 0 {
return
}
pre := -1
now := -1
t := "$queue/" + topic
cnt, exist := b.queues[t]
if exist {
// log.Info("queue index : ", cnt)
for _, sub := range r.qsubs {
if sub.client.typ == REMOTE {
if c.typ == REMOTE {
var qsub []int
for i, sub := range c.subs {
s, ok := sub.(*subscription)
if ok {
if s.client.typ == ROUTER {
if typ != CLIENT {
continue
}
}
if c.typ == CLIENT {
now = now + 1
if s.share {
qsub = append(qsub, i)
} else {
now = now + sub.client.rsubs[t].num
publish(s, packet)
}
if cnt > pre && cnt <= now {
if sub != nil {
err := sub.client.WriterPacket(packet)
if err != nil {
log.Error("send publish error, ", err, " clientID = ", c.info.clientID)
}
}
break
}
pre = now
}
}
length := getQueueSubscribeNum(r.qsubs)
if length > 0 {
b.queues[t] = (b.queues[t] + 1) % length
if len(qsub) > 0 {
idx := r.Intn(len(qsub))
sub := c.subs[qsub[idx]].(*subscription)
publish(sub, packet)
}
}
func getQueueSubscribeNum(qsubs []*subscription) int {
topic := "$queue/"
if len(qsubs) < 1 {
return 0
} else {
topic = topic + qsubs[0].topic
}
num := 0
for _, sub := range qsubs {
if sub.client.typ == CLIENT {
num = num + 1
} else {
num = num + sub.client.rsubs[topic].num
}
}
return num
}
func (c *client) ProcessSubscribe(packet *packets.SubscribePacket) {
switch c.typ {
case CLIENT:
c.processClientSubscribe(packet)
case ROUTER:
c.processRouterSubscribe(packet)
}
}
func (c *client) processClientSubscribe(packet *packets.SubscribePacket) {
if c.status == Disconnected {
return
}
@@ -324,85 +355,151 @@ func (c *client) ProcessSubscribe(packet *packets.SubscribePacket) {
for i, topic := range topics {
t := topic
//check topic auth for client
if !c.CheckTopicAuth(SUB, topic) {
log.Error("Sub topic Auth failed: ", topic, " clientID = ", c.info.clientID)
if !b.CheckTopicAuth(SUB, c.info.clientID, c.info.username, c.info.remoteIP, topic) {
log.Error("Sub topic Auth failed: ", zap.String("topic", topic), zap.String("ClientID", c.info.clientID))
retcodes = append(retcodes, QosFailure)
continue
}
queue := strings.HasPrefix(topic, "$queue/")
if queue {
if len(t) > 7 {
t = t[7:]
if _, exists := b.queues[topic]; !exists {
b.queues[topic] = 0
}
} else {
b.Publish(&bridge.Elements{
ClientID: c.info.clientID,
Username: c.info.username,
Action: bridge.Subscribe,
Timestamp: time.Now().Unix(),
Topic: topic,
})
groupName := ""
share := false
if strings.HasPrefix(topic, "$share/") {
substr := groupCompile.FindStringSubmatch(topic)
if len(substr) != 3 {
retcodes = append(retcodes, QosFailure)
continue
}
share = true
groupName = substr[1]
topic = substr[2]
}
if oldSub, exist := c.subMap[t]; exist {
c.topicsMgr.Unsubscribe([]byte(oldSub.topic), oldSub)
delete(c.subMap, t)
}
sub := &subscription{
topic: t,
qos: qoss[i],
client: c,
queue: queue,
topic: topic,
qos: qoss[i],
client: c,
share: share,
groupName: groupName,
}
switch c.typ {
case CLIENT:
if _, exist := c.subs[topic]; !exist {
c.subs[topic] = sub
} else {
//if exist ,check whether qos change
c.subs[topic].qos = qoss[i]
retcodes = append(retcodes, qoss[i])
continue
}
case REMOTE:
if subinfo, exist := c.rsubs[topic]; !exist {
sinfo := &subInfo{sub: sub, num: 1}
c.rsubs[topic] = sinfo
} else {
subinfo.num = subinfo.num + 1
retcodes = append(retcodes, qoss[i])
continue
}
}
err := b.sl.Insert(sub)
rqos, err := c.topicsMgr.Subscribe([]byte(topic), qoss[i], sub)
if err != nil {
log.Error("Insert subscription error: ", err, " clientID = ", c.info.clientID)
log.Error("subscribe error, ", zap.Error(err), zap.String("ClientID", c.info.clientID))
retcodes = append(retcodes, QosFailure)
} else {
retcodes = append(retcodes, qoss[i])
continue
}
c.subMap[t] = sub
c.session.AddTopic(t, qoss[i])
retcodes = append(retcodes, rqos)
c.topicsMgr.Retained([]byte(topic), &c.rmsgs)
}
suback.ReturnCodes = retcodes
err := c.WriterPacket(suback)
if err != nil {
log.Error("send suback error, ", err, " clientID = ", c.info.clientID)
log.Error("send suback error, ", zap.Error(err), zap.String("ClientID", c.info.clientID))
return
}
//broadcast subscribe message
if c.typ == CLIENT {
go b.BroadcastSubOrUnsubMessage(packet)
}
go b.BroadcastSubOrUnsubMessage(packet)
//process retain message
for _, t := range topics {
packets := b.rl.Match(t)
for _, packet := range packets {
log.Info("process retain message: ", packet, " clientID = ", c.info.clientID)
if packet != nil {
c.WriterPacket(packet)
}
for _, rm := range c.rmsgs {
if err := c.WriterPacket(rm); err != nil {
log.Error("Error publishing retained message:", zap.Any("err", err), zap.String("ClientID", c.info.clientID))
} else {
log.Info("process retain message: ", zap.Any("packet", packet), zap.String("ClientID", c.info.clientID))
}
}
}
func (c *client) processRouterSubscribe(packet *packets.SubscribePacket) {
if c.status == Disconnected {
return
}
b := c.broker
if b == nil {
return
}
topics := packet.Topics
qoss := packet.Qoss
suback := packets.NewControlPacket(packets.Suback).(*packets.SubackPacket)
suback.MessageID = packet.MessageID
var retcodes []byte
for i, topic := range topics {
t := topic
groupName := ""
share := false
if strings.HasPrefix(topic, "$share/") {
substr := groupCompile.FindStringSubmatch(topic)
if len(substr) != 3 {
retcodes = append(retcodes, QosFailure)
continue
}
share = true
groupName = substr[1]
topic = substr[2]
}
sub := &subscription{
topic: topic,
qos: qoss[i],
client: c,
share: share,
groupName: groupName,
}
rqos, err := c.topicsMgr.Subscribe([]byte(topic), qoss[i], sub)
if err != nil {
log.Error("subscribe error, ", zap.Error(err), zap.String("ClientID", c.info.clientID))
retcodes = append(retcodes, QosFailure)
continue
}
c.subMap[t] = sub
addSubMap(c.routeSubMap, topic)
retcodes = append(retcodes, rqos)
}
suback.ReturnCodes = retcodes
err := c.WriterPacket(suback)
if err != nil {
log.Error("send suback error, ", zap.Error(err), zap.String("ClientID", c.info.clientID))
return
}
}
func (c *client) ProcessUnSubscribe(packet *packets.UnsubscribePacket) {
switch c.typ {
case CLIENT:
c.processClientUnSubscribe(packet)
case ROUTER:
c.processRouterUnSubscribe(packet)
}
}
func (c *client) processRouterUnSubscribe(packet *packets.UnsubscribePacket) {
if c.status == Disconnected {
return
}
@@ -410,28 +507,18 @@ func (c *client) ProcessUnSubscribe(packet *packets.UnsubscribePacket) {
if b == nil {
return
}
typ := c.typ
topics := packet.Topics
for _, t := range topics {
for _, topic := range topics {
sub, exist := c.subMap[topic]
if exist {
retainNum := delSubMap(c.routeSubMap, topic)
if retainNum > 0 {
continue
}
switch typ {
case CLIENT:
sub, ok := c.subs[t]
if ok {
c.unsubscribe(sub)
}
case REMOTE:
subinfo, ok := c.rsubs[t]
if ok {
subinfo.num = subinfo.num - 1
if subinfo.num < 1 {
delete(c.rsubs, t)
c.unsubscribe(subinfo.sub)
} else {
c.rsubs[t] = subinfo
}
}
c.topicsMgr.Unsubscribe([]byte(sub.topic), sub)
delete(c.subMap, topic)
}
}
@@ -441,26 +528,54 @@ func (c *client) ProcessUnSubscribe(packet *packets.UnsubscribePacket) {
err := c.WriterPacket(unsuback)
if err != nil {
log.Error("send unsuback error, ", err, " clientID = ", c.info.clientID)
log.Error("send unsuback error, ", zap.Error(err), zap.String("ClientID", c.info.clientID))
return
}
// //process ubsubscribe message
if c.typ == CLIENT {
b.BroadcastSubOrUnsubMessage(packet)
}
}
func (c *client) unsubscribe(sub *subscription) {
if c.typ == CLIENT {
delete(c.subs, sub.topic)
func (c *client) processClientUnSubscribe(packet *packets.UnsubscribePacket) {
if c.status == Disconnected {
return
}
b := c.broker
if b != nil && sub != nil {
b.sl.Remove(sub)
if b == nil {
return
}
topics := packet.Topics
for _, topic := range topics {
{
//publish kafka
b.Publish(&bridge.Elements{
ClientID: c.info.clientID,
Username: c.info.username,
Action: bridge.Unsubscribe,
Timestamp: time.Now().Unix(),
Topic: topic,
})
}
sub, exist := c.subMap[topic]
if exist {
c.topicsMgr.Unsubscribe([]byte(sub.topic), sub)
c.session.RemoveTopic(topic)
delete(c.subMap, topic)
}
}
unsuback := packets.NewControlPacket(packets.Unsuback).(*packets.UnsubackPacket)
unsuback.MessageID = packet.MessageID
err := c.WriterPacket(unsuback)
if err != nil {
log.Error("send unsuback error, ", zap.Error(err), zap.String("ClientID", c.info.clientID))
return
}
// //process ubsubscribe message
b.BroadcastSubOrUnsubMessage(packet)
}
func (c *client) ProcessPing() {
@@ -470,61 +585,80 @@ func (c *client) ProcessPing() {
resp := packets.NewControlPacket(packets.Pingresp).(*packets.PingrespPacket)
err := c.WriterPacket(resp)
if err != nil {
log.Error("send PingResponse error, ", err, " clientID = ", c.info.clientID)
log.Error("send PingResponse error, ", zap.Error(err), zap.String("ClientID", c.info.clientID))
return
}
}
func (c *client) Close() {
c.smu.Lock()
if c.status == Disconnected {
c.smu.Unlock()
return
}
//wait for message complete
time.Sleep(1 * time.Second)
c.cancelFunc()
c.status = Disconnected
//wait for message complete
// time.Sleep(1 * time.Second)
// c.status = Disconnected
b := c.broker
b.Publish(&bridge.Elements{
ClientID: c.info.clientID,
Username: c.info.username,
Action: bridge.Disconnect,
Timestamp: time.Now().Unix(),
})
if c.conn != nil {
c.conn.Close()
c.conn = nil
}
c.smu.Unlock()
subs := c.subMap
close(c.closed)
b := c.broker
subs := c.subs
if b != nil {
b.removeClient(c)
for _, sub := range subs {
err := b.sl.Remove(sub)
err := b.topicsMgr.Unsubscribe([]byte(sub.topic), sub)
if err != nil {
log.Error("closed client but remove sublist error, ", err, " clientID = ", c.info.clientID)
log.Error("unsubscribe error, ", zap.Error(err), zap.String("ClientID", c.info.clientID))
}
}
if c.typ == CLIENT {
b.BroadcastUnSubscribe(subs)
//offline notification
b.OnlineOfflineNotification(c.info.clientID, false)
}
if c.info.willMsg != nil {
b.PublishMessage(c.info.willMsg)
}
if c.typ == CLUSTER {
b.ConnectToDiscovery()
}
//do reconnect
if c.typ == REMOTE {
localUrl := c.info.localIP + ":" + c.broker.config.Cluster.Port
if c.route.remoteUrl != localUrl {
b.connectRouter(c.route.remoteUrl, "")
}
go b.connectRouter(c.route.remoteID, c.route.remoteUrl)
}
}
}
func (c *client) WriterPacket(packet packets.ControlPacket) error {
if c.status == Disconnected {
return nil
}
if packet == nil {
return nil
}
if c.conn == nil {
c.Close()
return errors.New("connect lost ....")
}
c.mu.Lock()
err := packet.Write(c.conn)

View File

@@ -1,15 +1,15 @@
package broker
import (
"crypto/md5"
"crypto/rand"
"encoding/base64"
"encoding/hex"
"errors"
"io"
"encoding/json"
"reflect"
"strings"
"time"
"github.com/tidwall/gjson"
"go.uber.org/zap"
"github.com/eclipse/paho.mqtt.golang/packets"
uuid "github.com/satori/go.uuid"
)
const (
@@ -46,47 +46,6 @@ const (
QosFailure = 0x80
)
func SubscribeTopicCheckAndSpilt(topic string) ([]string, error) {
if strings.Index(topic, "#") != -1 && strings.Index(topic, "#") != len(topic)-1 {
return nil, errors.New("Topic format error with index of #")
}
re := strings.Split(topic, "/")
for i, v := range re {
if i != 0 && i != (len(re)-1) {
if v == "" {
return nil, errors.New("Topic format error with index of //")
}
if strings.Contains(v, "+") && v != "+" {
return nil, errors.New("Topic format error with index of +")
}
} else {
if v == "" {
re[i] = "/"
}
}
}
return re, nil
}
func PublishTopicCheckAndSpilt(topic string) ([]string, error) {
if strings.Index(topic, "#") != -1 || strings.Index(topic, "+") != -1 {
return nil, errors.New("Publish Topic format error with + and #")
}
re := strings.Split(topic, "/")
for i, v := range re {
if v == "" {
if i != 0 && i != (len(re)-1) {
return nil, errors.New("Topic format error with index of //")
} else {
re[i] = "/"
}
}
}
return re, nil
}
func equal(k1, k2 interface{}) bool {
if reflect.TypeOf(k1) != reflect.TypeOf(k2) {
return false
@@ -132,13 +91,65 @@ func equal(k1, k2 interface{}) bool {
return false
}
func GenUniqueId() string {
b := make([]byte, 48)
if _, err := io.ReadFull(rand.Reader, b); err != nil {
return ""
func addSubMap(m map[string]uint64, topic string) {
subNum, exist := m[topic]
if exist {
m[topic] = subNum + 1
} else {
m[topic] = 1
}
}
func delSubMap(m map[string]uint64, topic string) uint64 {
subNum, exist := m[topic]
if exist {
if subNum > 1 {
m[topic] = subNum - 1
return subNum - 1
}
} else {
m[topic] = 0
}
return 0
}
func GenUniqueId() string {
return uuid.NewV4().String()
}
func wrapPublishPacket(packet *packets.PublishPacket) *packets.PublishPacket {
p := packet.Copy()
wrapPayload := map[string]interface{}{
"message_id": GenUniqueId(),
"payload": string(p.Payload),
}
b, _ := json.Marshal(wrapPayload)
p.Payload = b
return p
}
func unWrapPublishPacket(packet *packets.PublishPacket) *packets.PublishPacket {
p := packet.Copy()
if gjson.GetBytes(p.Payload, "payload").Exists() {
p.Payload = []byte(gjson.GetBytes(p.Payload, "payload").String())
}
return p
}
func publish(sub *subscription, packet *packets.PublishPacket) {
// var p *packets.PublishPacket
// if sub.client.info.username != "root" {
// p = unWrapPublishPacket(packet)
// } else {
// p = wrapPublishPacket(packet)
// }
// err := sub.client.WriterPacket(p)
// if err != nil {
// log.Error("process message for psub error, ", zap.Error(err))
// }
err := sub.client.WriterPacket(packet)
if err != nil {
log.Error("process message for psub error, ", zap.Error(err))
}
h := md5.New()
h.Write([]byte(base64.URLEncoding.EncodeToString(b)))
return hex.EncodeToString(h.Sum(nil))
// return GetMd5String()
}

View File

@@ -4,35 +4,41 @@ import (
"crypto/tls"
"crypto/x509"
"encoding/json"
"errors"
"flag"
"fmt"
"io/ioutil"
"os"
log "github.com/cihub/seelog"
)
const (
CONFIGFILE = "conf/hmq.config"
"github.com/fhmq/hmq/logger"
"go.uber.org/zap"
)
type Config struct {
Worker int `json:"workerNum"`
Host string `json:"host"`
Port string `json:"port"`
Cluster RouteInfo `json:"cluster"`
TlsHost string `json:"tlsHost"`
TlsPort string `json:"tlsPort"`
WsPath string `json:"wsPath"`
WsPort string `json:"wsPort"`
WsTLS bool `json:"wsTLS"`
TlsInfo TLSInfo `json:"tlsInfo"`
Acl bool `json:"acl"`
AclConf string `json:"aclConf"`
Worker int `json:"workerNum"`
HTTPPort string `json:"httpPort"`
Host string `json:"host"`
Port string `json:"port"`
Cluster RouteInfo `json:"cluster"`
Router string `json:"router"`
TlsHost string `json:"tlsHost"`
TlsPort string `json:"tlsPort"`
WsPath string `json:"wsPath"`
WsPort string `json:"wsPort"`
WsTLS bool `json:"wsTLS"`
TlsInfo TLSInfo `json:"tlsInfo"`
Debug bool `json:"debug"`
Plugin Plugins `json:"plugins"`
}
type Plugins struct {
Auth string
Bridge string
}
type RouteInfo struct {
Host string `json:"host"`
Port string `json:"port"`
Routes []string `json:"routes"`
Host string `json:"host"`
Port string `json:"port"`
}
type TLSInfo struct {
@@ -42,11 +48,96 @@ type TLSInfo struct {
KeyFile string `json:"keyFile"`
}
func LoadConfig() (*Config, error) {
var DefaultConfig *Config = &Config{
Worker: 4096,
Host: "0.0.0.0",
Port: "1883",
}
content, err := ioutil.ReadFile(CONFIGFILE)
var (
log = logger.Prod().Named("broker")
)
func showHelp() {
fmt.Printf("%s\n", usageStr)
os.Exit(0)
}
func ConfigureConfig(args []string) (*Config, error) {
config := &Config{}
var (
help bool
configFile string
)
fs := flag.NewFlagSet("hmq-broker", flag.ExitOnError)
fs.Usage = showHelp
fs.BoolVar(&help, "h", false, "Show this message.")
fs.BoolVar(&help, "help", false, "Show this message.")
fs.IntVar(&config.Worker, "w", 1024, "worker num to process message, perfer (client num)/10.")
fs.IntVar(&config.Worker, "worker", 1024, "worker num to process message, perfer (client num)/10.")
fs.StringVar(&config.HTTPPort, "httpport", "8080", "Port to listen on.")
fs.StringVar(&config.HTTPPort, "hp", "8080", "Port to listen on.")
fs.StringVar(&config.Port, "port", "1883", "Port to listen on.")
fs.StringVar(&config.Port, "p", "1883", "Port to listen on.")
fs.StringVar(&config.Host, "host", "0.0.0.0", "Network host to listen on")
fs.StringVar(&config.Cluster.Port, "cp", "", "Cluster port from which members can connect.")
fs.StringVar(&config.Cluster.Port, "clusterport", "", "Cluster port from which members can connect.")
fs.StringVar(&config.Router, "r", "", "Router who maintenance cluster info")
fs.StringVar(&config.Router, "router", "", "Router who maintenance cluster info")
fs.StringVar(&config.WsPort, "ws", "", "port for ws to listen on")
fs.StringVar(&config.WsPort, "wsport", "", "port for ws to listen on")
fs.StringVar(&config.WsPath, "wsp", "", "path for ws to listen on")
fs.StringVar(&config.WsPath, "wspath", "", "path for ws to listen on")
fs.StringVar(&configFile, "config", "", "config file for hmq")
fs.StringVar(&configFile, "c", "", "config file for hmq")
fs.BoolVar(&config.Debug, "debug", false, "enable Debug logging.")
fs.BoolVar(&config.Debug, "d", false, "enable Debug logging.")
fs.Bool("D", true, "enable Debug logging.")
if err := fs.Parse(args); err != nil {
return nil, err
}
if help {
showHelp()
return nil, nil
}
fs.Visit(func(f *flag.Flag) {
switch f.Name {
case "D":
config.Debug = true
}
})
if configFile != "" {
tmpConfig, e := LoadConfig(configFile)
if e != nil {
return nil, e
} else {
config = tmpConfig
}
}
if config.Debug {
log = logger.Debug().Named("broker")
}
if err := config.check(); err != nil {
return nil, err
}
return config, nil
}
func LoadConfig(filename string) (*Config, error) {
content, err := ioutil.ReadFile(filename)
if err != nil {
log.Error("Read config file error: ", err)
// log.Error("Read config file error: ", zap.Error(err))
return nil, err
}
// log.Info(string(content))
@@ -54,16 +145,19 @@ func LoadConfig() (*Config, error) {
var config Config
err = json.Unmarshal(content, &config)
if err != nil {
log.Error("Unmarshal config file error: ", err)
// log.Error("Unmarshal config file error: ", zap.Error(err))
return nil, err
}
return &config, nil
}
func (config *Config) check() error {
if config.Worker == 0 {
config.Worker = 1024
}
WorkNum = config.Worker
if config.Port != "" {
if config.Host == "" {
config.Host = "0.0.0.0"
@@ -75,29 +169,33 @@ func LoadConfig() (*Config, error) {
config.Cluster.Host = "0.0.0.0"
}
}
if config.Router != "" {
if config.Cluster.Port == "" {
return errors.New("cluster port is null")
}
}
if config.TlsPort != "" {
if config.TlsInfo.CertFile == "" || config.TlsInfo.KeyFile == "" {
log.Error("tls config error, no cert or key file.")
return nil, err
return errors.New("tls config error, no cert or key file.")
}
if config.TlsHost == "" {
config.TlsHost = "0.0.0.0"
}
}
return &config, nil
return nil
}
func NewTLSConfig(tlsInfo TLSInfo) (*tls.Config, error) {
cert, err := tls.LoadX509KeyPair(tlsInfo.CertFile, tlsInfo.KeyFile)
if err != nil {
return nil, fmt.Errorf("error parsing X509 certificate/key pair: %v", err)
return nil, fmt.Errorf("error parsing X509 certificate/key pair: %v", zap.Error(err))
}
cert.Leaf, err = x509.ParseCertificate(cert.Certificate[0])
if err != nil {
return nil, fmt.Errorf("error parsing certificate: %v", err)
return nil, fmt.Errorf("error parsing certificate: %v", zap.Error(err))
}
// Create TLSConfig

View File

@@ -1,44 +0,0 @@
package broker
var WorkNum int
type Dispatcher struct {
WorkerPool chan chan *Message
}
func StartDispatcher() {
InitMessagePool()
dispatcher := NewDispatcher()
dispatcher.Run()
}
func (d *Dispatcher) Run() {
// starting n number of workers
for i := 0; i < WorkNum; i++ {
worker := NewWorker(d.WorkerPool)
worker.Start()
}
go d.dispatch()
}
func NewDispatcher() *Dispatcher {
pool := make(chan chan *Message, WorkNum)
return &Dispatcher{WorkerPool: pool}
}
func (d *Dispatcher) dispatch() {
for i := 0; i < (MessagePoolNum + 2); i++ {
go func(idx int) {
for {
select {
case msg := <-MSGPool[idx].queue:
go func(msg *Message) {
msgChannel := <-d.WorkerPool
msgChannel <- msg
}(msg)
}
}
}(i)
}
}

26
broker/http.go Normal file
View File

@@ -0,0 +1,26 @@
package broker
import (
"github.com/gin-gonic/gin"
)
func InitHTTPMoniter(b *Broker) {
gin.SetMode(gin.ReleaseMode)
router := gin.Default()
router.DELETE("api/v1/connections/:clientid", func(c *gin.Context) {
clientid := c.Param("clientid")
cli, ok := b.clients.Load(clientid)
if ok {
conn, succss := cli.(*client)
if succss {
conn.Close()
}
}
resp := map[string]int{
"code": 0,
}
c.JSON(200, &resp)
})
router.Run(":" + b.config.HTTPPort)
}

View File

@@ -4,10 +4,9 @@ import (
"fmt"
"time"
"github.com/eclipse/paho.mqtt.golang/packets"
simplejson "github.com/bitly/go-simplejson"
log "github.com/cihub/seelog"
"github.com/eclipse/paho.mqtt.golang/packets"
"go.uber.org/zap"
)
func (c *client) SendInfo() {
@@ -19,26 +18,24 @@ func (c *client) SendInfo() {
infoMsg := NewInfo(c.broker.id, url, false)
err := c.WriterPacket(infoMsg)
if err != nil {
log.Error("send info message error, ", err)
log.Error("send info message error, ", zap.Error(err))
return
}
}
func (c *client) StartPing() {
timeTicker := time.NewTicker(time.Second * 30)
timeTicker := time.NewTicker(time.Second * 50)
ping := packets.NewControlPacket(packets.Pingreq).(*packets.PingreqPacket)
for {
select {
case <-timeTicker.C:
err := c.WriterPacket(ping)
if err != nil {
log.Error("ping error: ", err)
log.Error("ping error: ", zap.Error(err))
c.Close()
}
case _, ok := <-c.closed:
if !ok {
return
}
case <-c.ctx.Done():
return
}
}
}
@@ -55,7 +52,7 @@ func (c *client) SendConnect() {
m.Keepalive = uint16(60)
err := c.WriterPacket(m)
if err != nil {
log.Error("send connect message error, ", err)
log.Error("send connect message error, ", zap.Error(err))
return
}
log.Info("send connect success")
@@ -66,7 +63,7 @@ func NewInfo(sid, url string, isforword bool) *packets.PublishPacket {
pub.Qos = 0
pub.TopicName = BrokerInfoTopic
pub.Retain = false
info := fmt.Sprintf(`{"remoteID":"%s","url":"%s","isForward":%t}`, sid, url, isforword)
info := fmt.Sprintf(`{"brokerID":"%s","brokerUrl":"%s"}`, sid, url)
// log.Info("new info", string(info))
pub.Payload = []byte(info)
return pub
@@ -79,51 +76,36 @@ func (c *client) ProcessInfo(packet *packets.PublishPacket) {
return
}
log.Info("recv remoteInfo: ", string(packet.Payload))
log.Info("recv remoteInfo: ", zap.String("payload", string(packet.Payload)))
js, e := simplejson.NewJson(packet.Payload)
if e != nil {
log.Warn("parse info message err", e)
js, err := simplejson.NewJson(packet.Payload)
if err != nil {
log.Warn("parse info message err", zap.Error(err))
return
}
rid := js.Get("remoteID").MustString()
rurl := js.Get("url").MustString()
isForward := js.Get("isForward").MustBool()
if rid == "" {
log.Error("receive info message error with remoteID is null")
routes, err := js.Get("data").Map()
if routes == nil {
log.Error("receive info message error, ", zap.Error(err))
return
}
if rid == b.id {
if !isForward {
c.Close() //close connet self
}
return
}
b.nodes = routes
b.mu.Lock()
exist := b.CheckRemoteExist(rid, rurl)
if !exist {
b.connectRouter(rurl, rid)
}
b.mu.Unlock()
if !isForward {
if c.typ == ROUTER {
route := route{
remoteUrl: rurl,
remoteID: rid,
}
c.route = route
for rid, rurl := range routes {
if rid == b.id {
continue
}
go b.SendLocalSubsToRouter(c)
// log.Info("BroadcastInfoMessage starting... ")
infoMsg := NewInfo(rid, rurl, true)
b.BroadcastInfoMessage(rid, infoMsg)
}
url, ok := rurl.(string)
if ok {
exist := b.CheckRemoteExist(rid, url)
if !exist {
b.connectRouter(rid, url)
}
}
return
}
b.mu.Unlock()
}

View File

@@ -0,0 +1,62 @@
package sessions
import (
"fmt"
"sync"
)
var _ SessionsProvider = (*memProvider)(nil)
func init() {
Register("mem", NewMemProvider())
}
type memProvider struct {
st map[string]*Session
mu sync.RWMutex
}
func NewMemProvider() *memProvider {
return &memProvider{
st: make(map[string]*Session),
}
}
func (this *memProvider) New(id string) (*Session, error) {
this.mu.Lock()
defer this.mu.Unlock()
this.st[id] = &Session{id: id}
return this.st[id], nil
}
func (this *memProvider) Get(id string) (*Session, error) {
this.mu.RLock()
defer this.mu.RUnlock()
sess, ok := this.st[id]
if !ok {
return nil, fmt.Errorf("store/Get: No session found for key %s", id)
}
return sess, nil
}
func (this *memProvider) Del(id string) {
this.mu.Lock()
defer this.mu.Unlock()
delete(this.st, id)
}
func (this *memProvider) Save(id string) error {
return nil
}
func (this *memProvider) Count() int {
return len(this.st)
}
func (this *memProvider) Close() error {
this.st = make(map[string]*Session)
return nil
}

View File

@@ -0,0 +1,149 @@
package sessions
import (
"fmt"
"sync"
"github.com/eclipse/paho.mqtt.golang/packets"
)
const (
// Queue size for the ack queue
defaultQueueSize = 16
)
type Session struct {
// cmsg is the CONNECT message
cmsg *packets.ConnectPacket
// Will message to publish if connect is closed unexpectedly
Will *packets.PublishPacket
// Retained publish message
Retained *packets.PublishPacket
// topics stores all the topis for this session/client
topics map[string]byte
// Initialized?
initted bool
// Serialize access to this session
mu sync.Mutex
id string
}
func (this *Session) Init(msg *packets.ConnectPacket) error {
this.mu.Lock()
defer this.mu.Unlock()
if this.initted {
return fmt.Errorf("Session already initialized")
}
this.cmsg = msg
if this.cmsg.WillFlag {
this.Will = packets.NewControlPacket(packets.Publish).(*packets.PublishPacket)
this.Will.Qos = this.cmsg.Qos
this.Will.TopicName = this.cmsg.WillTopic
this.Will.Payload = this.cmsg.WillMessage
this.Will.Retain = this.cmsg.WillRetain
}
this.topics = make(map[string]byte, 1)
this.id = string(msg.ClientIdentifier)
this.initted = true
return nil
}
func (this *Session) Update(msg *packets.ConnectPacket) error {
this.mu.Lock()
defer this.mu.Unlock()
this.cmsg = msg
return nil
}
func (this *Session) RetainMessage(msg *packets.PublishPacket) error {
this.mu.Lock()
defer this.mu.Unlock()
this.Retained = msg
return nil
}
func (this *Session) AddTopic(topic string, qos byte) error {
this.mu.Lock()
defer this.mu.Unlock()
if !this.initted {
return fmt.Errorf("Session not yet initialized")
}
this.topics[topic] = qos
return nil
}
func (this *Session) RemoveTopic(topic string) error {
this.mu.Lock()
defer this.mu.Unlock()
if !this.initted {
return fmt.Errorf("Session not yet initialized")
}
delete(this.topics, topic)
return nil
}
func (this *Session) Topics() ([]string, []byte, error) {
this.mu.Lock()
defer this.mu.Unlock()
if !this.initted {
return nil, nil, fmt.Errorf("Session not yet initialized")
}
var (
topics []string
qoss []byte
)
for k, v := range this.topics {
topics = append(topics, k)
qoss = append(qoss, v)
}
return topics, qoss, nil
}
func (this *Session) ID() string {
return this.cmsg.ClientIdentifier
}
func (this *Session) WillFlag() bool {
this.mu.Lock()
defer this.mu.Unlock()
return this.cmsg.WillFlag
}
func (this *Session) SetWillFlag(v bool) {
this.mu.Lock()
defer this.mu.Unlock()
this.cmsg.WillFlag = v
}
func (this *Session) CleanSession() bool {
this.mu.Lock()
defer this.mu.Unlock()
return this.cmsg.CleanSession
}

View File

@@ -0,0 +1,92 @@
package sessions
import (
"crypto/rand"
"encoding/base64"
"errors"
"fmt"
"io"
)
var (
ErrSessionsProviderNotFound = errors.New("Session: Session provider not found")
ErrKeyNotAvailable = errors.New("Session: not item found for key.")
providers = make(map[string]SessionsProvider)
)
type SessionsProvider interface {
New(id string) (*Session, error)
Get(id string) (*Session, error)
Del(id string)
Save(id string) error
Count() int
Close() error
}
// Register makes a session provider available by the provided name.
// If a Register is called twice with the same name or if the driver is nil,
// it panics.
func Register(name string, provider SessionsProvider) {
if provider == nil {
panic("session: Register provide is nil")
}
if _, dup := providers[name]; dup {
panic("session: Register called twice for provider " + name)
}
providers[name] = provider
}
func Unregister(name string) {
delete(providers, name)
}
type Manager struct {
p SessionsProvider
}
func NewManager(providerName string) (*Manager, error) {
p, ok := providers[providerName]
if !ok {
return nil, fmt.Errorf("session: unknown provider %q", providerName)
}
return &Manager{p: p}, nil
}
func (this *Manager) New(id string) (*Session, error) {
if id == "" {
id = this.sessionId()
}
return this.p.New(id)
}
func (this *Manager) Get(id string) (*Session, error) {
return this.p.Get(id)
}
func (this *Manager) Del(id string) {
this.p.Del(id)
}
func (this *Manager) Save(id string) error {
return this.p.Save(id)
}
func (this *Manager) Count() int {
return this.p.Count()
}
func (this *Manager) Close() error {
return this.p.Close()
}
func (manager *Manager) sessionId() string {
b := make([]byte, 15)
if _, err := io.ReadFull(rand.Reader, b); err != nil {
return ""
}
return base64.URLEncoding.EncodeToString(b)
}

View File

@@ -0,0 +1,550 @@
package topics
import (
"fmt"
"reflect"
"sync"
"github.com/eclipse/paho.mqtt.golang/packets"
)
const (
QosAtMostOnce byte = iota
QosAtLeastOnce
QosExactlyOnce
QosFailure = 0x80
)
var _ TopicsProvider = (*memTopics)(nil)
type memTopics struct {
// Sub/unsub mutex
smu sync.RWMutex
// Subscription tree
sroot *snode
// Retained message mutex
rmu sync.RWMutex
// Retained messages topic tree
rroot *rnode
}
func init() {
Register("mem", NewMemProvider())
}
// NewMemProvider returns an new instance of the memTopics, which is implements the
// TopicsProvider interface. memProvider is a hidden struct that stores the topic
// subscriptions and retained messages in memory. The content is not persistend so
// when the server goes, everything will be gone. Use with care.
func NewMemProvider() *memTopics {
return &memTopics{
sroot: newSNode(),
rroot: newRNode(),
}
}
func ValidQos(qos byte) bool {
return qos == QosAtMostOnce || qos == QosAtLeastOnce || qos == QosExactlyOnce
}
func (this *memTopics) Subscribe(topic []byte, qos byte, sub interface{}) (byte, error) {
if !ValidQos(qos) {
return QosFailure, fmt.Errorf("Invalid QoS %d", qos)
}
if sub == nil {
return QosFailure, fmt.Errorf("Subscriber cannot be nil")
}
this.smu.Lock()
defer this.smu.Unlock()
if qos > QosExactlyOnce {
qos = QosExactlyOnce
}
if err := this.sroot.sinsert(topic, qos, sub); err != nil {
return QosFailure, err
}
return qos, nil
}
func (this *memTopics) Unsubscribe(topic []byte, sub interface{}) error {
this.smu.Lock()
defer this.smu.Unlock()
return this.sroot.sremove(topic, sub)
}
// Returned values will be invalidated by the next Subscribers call
func (this *memTopics) Subscribers(topic []byte, qos byte, subs *[]interface{}, qoss *[]byte) error {
if !ValidQos(qos) {
return fmt.Errorf("Invalid QoS %d", qos)
}
this.smu.RLock()
defer this.smu.RUnlock()
*subs = (*subs)[0:0]
*qoss = (*qoss)[0:0]
return this.sroot.smatch(topic, qos, subs, qoss)
}
func (this *memTopics) Retain(msg *packets.PublishPacket) error {
this.rmu.Lock()
defer this.rmu.Unlock()
// So apparently, at least according to the MQTT Conformance/Interoperability
// Testing, that a payload of 0 means delete the retain message.
// https://eclipse.org/paho/clients/testing/
if len(msg.Payload) == 0 {
return this.rroot.rremove([]byte(msg.TopicName))
}
return this.rroot.rinsertOrUpdate([]byte(msg.TopicName), msg)
}
func (this *memTopics) Retained(topic []byte, msgs *[]*packets.PublishPacket) error {
this.rmu.RLock()
defer this.rmu.RUnlock()
return this.rroot.rmatch(topic, msgs)
}
func (this *memTopics) Close() error {
this.sroot = nil
this.rroot = nil
return nil
}
// subscrition nodes
type snode struct {
// If this is the end of the topic string, then add subscribers here
subs []interface{}
qos []byte
// Otherwise add the next topic level here
snodes map[string]*snode
}
func newSNode() *snode {
return &snode{
snodes: make(map[string]*snode),
}
}
func (this *snode) sinsert(topic []byte, qos byte, sub interface{}) error {
// If there's no more topic levels, that means we are at the matching snode
// to insert the subscriber. So let's see if there's such subscriber,
// if so, update it. Otherwise insert it.
if len(topic) == 0 {
// Let's see if the subscriber is already on the list. If yes, update
// QoS and then return.
for i := range this.subs {
if equal(this.subs[i], sub) {
this.qos[i] = qos
return nil
}
}
// Otherwise add.
this.subs = append(this.subs, sub)
this.qos = append(this.qos, qos)
return nil
}
// Not the last level, so let's find or create the next level snode, and
// recursively call it's insert().
// ntl = next topic level
ntl, rem, err := nextTopicLevel(topic)
if err != nil {
return err
}
level := string(ntl)
// Add snode if it doesn't already exist
n, ok := this.snodes[level]
if !ok {
n = newSNode()
this.snodes[level] = n
}
return n.sinsert(rem, qos, sub)
}
// This remove implementation ignores the QoS, as long as the subscriber
// matches then it's removed
func (this *snode) sremove(topic []byte, sub interface{}) error {
// If the topic is empty, it means we are at the final matching snode. If so,
// let's find the matching subscribers and remove them.
if len(topic) == 0 {
// If subscriber == nil, then it's signal to remove ALL subscribers
if sub == nil {
this.subs = this.subs[0:0]
this.qos = this.qos[0:0]
return nil
}
// If we find the subscriber then remove it from the list. Technically
// we just overwrite the slot by shifting all other items up by one.
for i := range this.subs {
if equal(this.subs[i], sub) {
this.subs = append(this.subs[:i], this.subs[i+1:]...)
this.qos = append(this.qos[:i], this.qos[i+1:]...)
return nil
}
}
return fmt.Errorf("No topic found for subscriber")
}
// Not the last level, so let's find the next level snode, and recursively
// call it's remove().
// ntl = next topic level
ntl, rem, err := nextTopicLevel(topic)
if err != nil {
return err
}
level := string(ntl)
// Find the snode that matches the topic level
n, ok := this.snodes[level]
if !ok {
return fmt.Errorf("No topic found")
}
// Remove the subscriber from the next level snode
if err := n.sremove(rem, sub); err != nil {
return err
}
// If there are no more subscribers and snodes to the next level we just visited
// let's remove it
if len(n.subs) == 0 && len(n.snodes) == 0 {
delete(this.snodes, level)
}
return nil
}
// smatch() returns all the subscribers that are subscribed to the topic. Given a topic
// with no wildcards (publish topic), it returns a list of subscribers that subscribes
// to the topic. For each of the level names, it's a match
// - if there are subscribers to '#', then all the subscribers are added to result set
func (this *snode) smatch(topic []byte, qos byte, subs *[]interface{}, qoss *[]byte) error {
// If the topic is empty, it means we are at the final matching snode. If so,
// let's find the subscribers that match the qos and append them to the list.
if len(topic) == 0 {
this.matchQos(qos, subs, qoss)
if mwcn, _ := this.snodes[MWC]; mwcn != nil {
mwcn.matchQos(qos, subs, qoss)
}
return nil
}
// ntl = next topic level
ntl, rem, err := nextTopicLevel(topic)
if err != nil {
return err
}
level := string(ntl)
for k, n := range this.snodes {
// If the key is "#", then these subscribers are added to the result set
if k == MWC {
n.matchQos(qos, subs, qoss)
} else if k == SWC || k == level {
if err := n.smatch(rem, qos, subs, qoss); err != nil {
return err
}
}
}
return nil
}
// retained message nodes
type rnode struct {
// If this is the end of the topic string, then add retained messages here
msg *packets.PublishPacket
// Otherwise add the next topic level here
rnodes map[string]*rnode
}
func newRNode() *rnode {
return &rnode{
rnodes: make(map[string]*rnode),
}
}
func (this *rnode) rinsertOrUpdate(topic []byte, msg *packets.PublishPacket) error {
// If there's no more topic levels, that means we are at the matching rnode.
if len(topic) == 0 {
// Reuse the message if possible
this.msg = msg
return nil
}
// Not the last level, so let's find or create the next level snode, and
// recursively call it's insert().
// ntl = next topic level
ntl, rem, err := nextTopicLevel(topic)
if err != nil {
return err
}
level := string(ntl)
// Add snode if it doesn't already exist
n, ok := this.rnodes[level]
if !ok {
n = newRNode()
this.rnodes[level] = n
}
return n.rinsertOrUpdate(rem, msg)
}
// Remove the retained message for the supplied topic
func (this *rnode) rremove(topic []byte) error {
// If the topic is empty, it means we are at the final matching rnode. If so,
// let's remove the buffer and message.
if len(topic) == 0 {
this.msg = nil
return nil
}
// Not the last level, so let's find the next level rnode, and recursively
// call it's remove().
// ntl = next topic level
ntl, rem, err := nextTopicLevel(topic)
if err != nil {
return err
}
level := string(ntl)
// Find the rnode that matches the topic level
n, ok := this.rnodes[level]
if !ok {
return fmt.Errorf("No topic found")
}
// Remove the subscriber from the next level rnode
if err := n.rremove(rem); err != nil {
return err
}
// If there are no more rnodes to the next level we just visited let's remove it
if len(n.rnodes) == 0 {
delete(this.rnodes, level)
}
return nil
}
// rmatch() finds the retained messages for the topic and qos provided. It's somewhat
// of a reverse match compare to match() since the supplied topic can contain
// wildcards, whereas the retained message topic is a full (no wildcard) topic.
func (this *rnode) rmatch(topic []byte, msgs *[]*packets.PublishPacket) error {
// If the topic is empty, it means we are at the final matching rnode. If so,
// add the retained msg to the list.
if len(topic) == 0 {
if this.msg != nil {
*msgs = append(*msgs, this.msg)
}
return nil
}
// ntl = next topic level
ntl, rem, err := nextTopicLevel(topic)
if err != nil {
return err
}
level := string(ntl)
if level == MWC {
// If '#', add all retained messages starting this node
this.allRetained(msgs)
} else if level == SWC {
// If '+', check all nodes at this level. Next levels must be matched.
for _, n := range this.rnodes {
if err := n.rmatch(rem, msgs); err != nil {
return err
}
}
} else {
// Otherwise, find the matching node, go to the next level
if n, ok := this.rnodes[level]; ok {
if err := n.rmatch(rem, msgs); err != nil {
return err
}
}
}
return nil
}
func (this *rnode) allRetained(msgs *[]*packets.PublishPacket) {
if this.msg != nil {
*msgs = append(*msgs, this.msg)
}
for _, n := range this.rnodes {
n.allRetained(msgs)
}
}
const (
stateCHR byte = iota // Regular character
stateMWC // Multi-level wildcard
stateSWC // Single-level wildcard
stateSEP // Topic level separator
stateSYS // System level topic ($)
)
// Returns topic level, remaining topic levels and any errors
func nextTopicLevel(topic []byte) ([]byte, []byte, error) {
s := stateCHR
for i, c := range topic {
switch c {
case '/':
if s == stateMWC {
return nil, nil, fmt.Errorf("Multi-level wildcard found in topic and it's not at the last level")
}
if i == 0 {
return []byte(SWC), topic[i+1:], nil
}
return topic[:i], topic[i+1:], nil
case '#':
if i != 0 {
return nil, nil, fmt.Errorf("Wildcard character '#' must occupy entire topic level")
}
s = stateMWC
case '+':
if i != 0 {
return nil, nil, fmt.Errorf("Wildcard character '+' must occupy entire topic level")
}
s = stateSWC
// case '$':
// if i == 0 {
// return nil, nil, fmt.Errorf("Cannot publish to $ topics")
// }
// s = stateSYS
default:
if s == stateMWC || s == stateSWC {
return nil, nil, fmt.Errorf("Wildcard characters '#' and '+' must occupy entire topic level")
}
s = stateCHR
}
}
// If we got here that means we didn't hit the separator along the way, so the
// topic is either empty, or does not contain a separator. Either way, we return
// the full topic
return topic, nil, nil
}
// The QoS of the payload messages sent in response to a subscription must be the
// minimum of the QoS of the originally published message (in this case, it's the
// qos parameter) and the maximum QoS granted by the server (in this case, it's
// the QoS in the topic tree).
//
// It's also possible that even if the topic matches, the subscriber is not included
// due to the QoS granted is lower than the published message QoS. For example,
// if the client is granted only QoS 0, and the publish message is QoS 1, then this
// client is not to be send the published message.
func (this *snode) matchQos(qos byte, subs *[]interface{}, qoss *[]byte) {
for _, sub := range this.subs {
// If the published QoS is higher than the subscriber QoS, then we skip the
// subscriber. Otherwise, add to the list.
// if qos >= this.qos[i] {
*subs = append(*subs, sub)
*qoss = append(*qoss, qos)
// }
}
}
func equal(k1, k2 interface{}) bool {
if reflect.TypeOf(k1) != reflect.TypeOf(k2) {
return false
}
if reflect.ValueOf(k1).Kind() == reflect.Func {
return &k1 == &k2
}
if k1 == k2 {
return true
}
switch k1 := k1.(type) {
case string:
return k1 == k2.(string)
case int64:
return k1 == k2.(int64)
case int32:
return k1 == k2.(int32)
case int16:
return k1 == k2.(int16)
case int8:
return k1 == k2.(int8)
case int:
return k1 == k2.(int)
case float32:
return k1 == k2.(float32)
case float64:
return k1 == k2.(float64)
case uint:
return k1 == k2.(uint)
case uint8:
return k1 == k2.(uint8)
case uint16:
return k1 == k2.(uint16)
case uint32:
return k1 == k2.(uint32)
case uint64:
return k1 == k2.(uint64)
case uintptr:
return k1 == k2.(uintptr)
}
return false
}

View File

@@ -0,0 +1,91 @@
package topics
import (
"fmt"
"github.com/eclipse/paho.mqtt.golang/packets"
)
const (
// MWC is the multi-level wildcard
MWC = "#"
// SWC is the single level wildcard
SWC = "+"
// SEP is the topic level separator
SEP = "/"
// SYS is the starting character of the system level topics
SYS = "$"
// Both wildcards
_WC = "#+"
)
var (
providers = make(map[string]TopicsProvider)
)
// TopicsProvider
type TopicsProvider interface {
Subscribe(topic []byte, qos byte, subscriber interface{}) (byte, error)
Unsubscribe(topic []byte, subscriber interface{}) error
Subscribers(topic []byte, qos byte, subs *[]interface{}, qoss *[]byte) error
Retain(msg *packets.PublishPacket) error
Retained(topic []byte, msgs *[]*packets.PublishPacket) error
Close() error
}
func Register(name string, provider TopicsProvider) {
if provider == nil {
panic("topics: Register provide is nil")
}
if _, dup := providers[name]; dup {
panic("topics: Register called twice for provider " + name)
}
providers[name] = provider
}
func Unregister(name string) {
delete(providers, name)
}
type Manager struct {
p TopicsProvider
}
func NewManager(providerName string) (*Manager, error) {
p, ok := providers[providerName]
if !ok {
return nil, fmt.Errorf("session: unknown provider %q", providerName)
}
return &Manager{p: p}, nil
}
func (this *Manager) Subscribe(topic []byte, qos byte, subscriber interface{}) (byte, error) {
return this.p.Subscribe(topic, qos, subscriber)
}
func (this *Manager) Unsubscribe(topic []byte, subscriber interface{}) error {
return this.p.Unsubscribe(topic, subscriber)
}
func (this *Manager) Subscribers(topic []byte, qos byte, subs *[]interface{}, qoss *[]byte) error {
return this.p.Subscribers(topic, qos, subs, qoss)
}
func (this *Manager) Retain(msg *packets.PublishPacket) error {
return this.p.Retain(msg)
}
func (this *Manager) Retained(topic []byte, msgs *[]*packets.PublishPacket) error {
return this.p.Retained(topic, msgs)
}
func (this *Manager) Close() error {
return this.p.Close()
}

View File

@@ -1,62 +0,0 @@
package broker
import (
"sync"
"github.com/eclipse/paho.mqtt.golang/packets"
)
const (
MaxUser = 1024 * 1024
MessagePoolNum = 1024
MessagePoolUser = MaxUser / MessagePoolNum
MessagePoolMessageNum = MaxUser / MessagePoolNum * 4
)
type Message struct {
client *client
packet packets.ControlPacket
}
var (
MSGPool []MessagePool
)
type MessagePool struct {
l sync.Mutex
maxuser int
user int
queue chan *Message
}
func InitMessagePool() {
MSGPool = make([]MessagePool, (MessagePoolNum + 2))
for i := 0; i < (MessagePoolNum + 2); i++ {
MSGPool[i].Init(MessagePoolUser, MessagePoolMessageNum)
}
}
func (p *MessagePool) Init(num int, maxusernum int) {
p.maxuser = maxusernum
p.queue = make(chan *Message, num)
}
func (p *MessagePool) GetPool() *MessagePool {
p.l.Lock()
if p.user+1 < p.maxuser {
p.user += 1
p.l.Unlock()
return p
} else {
p.l.Unlock()
return nil
}
}
func (p *MessagePool) Reduce() {
p.l.Lock()
p.user -= 1
p.l.Unlock()
}

View File

@@ -1,122 +0,0 @@
package broker
import (
"sync"
"github.com/eclipse/paho.mqtt.golang/packets"
)
type RetainList struct {
sync.RWMutex
root *rlevel
}
type rlevel struct {
nodes map[string]*rnode
}
type rnode struct {
next *rlevel
msg *packets.PublishPacket
}
type RetainResult struct {
msg []*packets.PublishPacket
}
func newRNode() *rnode {
return &rnode{}
}
func newRLevel() *rlevel {
return &rlevel{nodes: make(map[string]*rnode)}
}
func NewRetainList() *RetainList {
return &RetainList{root: newRLevel()}
}
func (r *RetainList) Insert(topic string, buf *packets.PublishPacket) error {
tokens, err := PublishTopicCheckAndSpilt(topic)
if err != nil {
return err
}
// log.Info("insert tokens:", tokens)
r.Lock()
l := r.root
var n *rnode
for _, t := range tokens {
n = l.nodes[t]
if n == nil {
n = newRNode()
l.nodes[t] = n
}
if n.next == nil {
n.next = newRLevel()
}
l = n.next
}
n.msg = buf
r.Unlock()
return nil
}
func (r *RetainList) Match(topic string) []*packets.PublishPacket {
tokens, err := SubscribeTopicCheckAndSpilt(topic)
if err != nil {
return nil
}
results := &RetainResult{}
r.Lock()
l := r.root
matchRLevel(l, tokens, results)
r.Unlock()
// log.Info("results: ", results)
return results.msg
}
func matchRLevel(l *rlevel, toks []string, results *RetainResult) {
var n *rnode
for i, t := range toks {
if l == nil {
return
}
// log.Info("l info :", l.nodes)
if t == "#" {
for _, n := range l.nodes {
n.GetAll(results)
}
}
if t == "+" {
for _, n := range l.nodes {
if len(t[i+1:]) == 0 {
results.msg = append(results.msg, n.msg)
} else {
matchRLevel(n.next, toks[i+1:], results)
}
}
}
n = l.nodes[t]
if n != nil {
l = n.next
} else {
l = nil
}
}
if n != nil {
results.msg = append(results.msg, n.msg)
}
}
func (r *rnode) GetAll(results *RetainResult) {
// log.Info("node 's message: ", string(r.msg))
if r.msg != nil {
results.msg = append(results.msg, r.msg)
}
l := r.next
for _, n := range l.nodes {
n.GetAll(results)
}
}

53
broker/sesson.go Normal file
View File

@@ -0,0 +1,53 @@
package broker
import "github.com/eclipse/paho.mqtt.golang/packets"
func (b *Broker) getSession(cli *client, req *packets.ConnectPacket, resp *packets.ConnackPacket) error {
// If CleanSession is set to 0, the server MUST resume communications with the
// client based on state from the current session, as identified by the client
// identifier. If there is no session associated with the client identifier the
// server must create a new session.
//
// If CleanSession is set to 1, the client and server must discard any previous
// session and start a new one. b session lasts as long as the network c
// onnection. State data associated with b session must not be reused in any
// subsequent session.
var err error
// Check to see if the client supplied an ID, if not, generate one and set
// clean session.
if len(req.ClientIdentifier) == 0 {
req.CleanSession = true
}
cid := req.ClientIdentifier
// If CleanSession is NOT set, check the session store for existing session.
// If found, return it.
if !req.CleanSession {
if cli.session, err = b.sessionMgr.Get(cid); err == nil {
resp.SessionPresent = true
if err := cli.session.Update(req); err != nil {
return err
}
}
}
// If CleanSession, or no existing session found, then create a new one
if cli.session == nil {
if cli.session, err = b.sessionMgr.New(cid); err != nil {
return err
}
resp.SessionPresent = false
if err := cli.session.Init(req); err != nil {
return err
}
}
return nil
}

View File

@@ -1,316 +0,0 @@
package broker
import (
"errors"
"sync"
log "github.com/cihub/seelog"
)
// A result structure better optimized for queue subs.
type SublistResult struct {
psubs []*subscription
qsubs []*subscription // don't make this a map, too expensive to iterate
}
// A Sublist stores and efficiently retrieves subscriptions.
type Sublist struct {
sync.RWMutex
cache map[string]*SublistResult
root *level
}
// A node contains subscriptions and a pointer to the next level.
type node struct {
next *level
psubs []*subscription
qsubs []*subscription
}
// A level represents a group of nodes and special pointers to
// wildcard nodes.
type level struct {
nodes map[string]*node
}
// Create a new default node.
func newNode() *node {
return &node{psubs: make([]*subscription, 0, 4), qsubs: make([]*subscription, 0, 4)}
}
// Create a new default level. We use FNV1A as the hash
// algortihm for the tokens, which should be short.
func newLevel() *level {
return &level{nodes: make(map[string]*node)}
}
// New will create a default sublist
func NewSublist() *Sublist {
return &Sublist{root: newLevel(), cache: make(map[string]*SublistResult)}
}
// Insert adds a subscription into the sublist
func (s *Sublist) Insert(sub *subscription) error {
tokens, err := SubscribeTopicCheckAndSpilt(sub.topic)
if err != nil {
return err
}
s.Lock()
l := s.root
var n *node
for _, t := range tokens {
n = l.nodes[t]
if n == nil {
n = newNode()
l.nodes[t] = n
}
if n.next == nil {
n.next = newLevel()
}
l = n.next
}
if sub.queue {
//check qsub is already exist
for i := range n.qsubs {
if equal(n.qsubs[i], sub) {
n.qsubs[i] = sub
return nil
}
}
n.qsubs = append(n.qsubs, sub)
} else {
//check psub is already exist
for i := range n.psubs {
if equal(n.psubs[i], sub) {
n.psubs[i] = sub
return nil
}
}
n.psubs = append(n.psubs, sub)
}
topic := string(sub.topic)
s.addToCache(topic, sub)
s.Unlock()
return nil
}
func (s *Sublist) addToCache(topic string, sub *subscription) {
for k, r := range s.cache {
if matchLiteral(k, topic) {
// Copy since others may have a reference.
nr := copyResult(r)
if sub.queue == false {
nr.psubs = append(nr.psubs, sub)
} else {
nr.qsubs = append(nr.qsubs, sub)
}
s.cache[k] = nr
}
}
}
func (s *Sublist) removeFromCache(topic string, sub *subscription) {
for k := range s.cache {
if !matchLiteral(k, topic) {
continue
}
// Since someone else may be referecing, can't modify the list
// safely, just let it re-populate.
delete(s.cache, k)
}
}
func matchLiteral(literal, topic string) bool {
tok, _ := SubscribeTopicCheckAndSpilt(topic)
li, _ := PublishTopicCheckAndSpilt(literal)
for i := 0; i < len(tok); i++ {
b := tok[i]
switch b {
case "+":
case "#":
return true
default:
if b != li[i] {
return false
}
}
}
return true
}
// Deep copy
func copyResult(r *SublistResult) *SublistResult {
nr := &SublistResult{}
nr.psubs = append([]*subscription(nil), r.psubs...)
nr.qsubs = append([]*subscription(nil), r.qsubs...)
return nr
}
func (s *Sublist) Remove(sub *subscription) error {
tokens, err := SubscribeTopicCheckAndSpilt(sub.topic)
if err != nil {
return err
}
s.Lock()
defer s.Unlock()
l := s.root
var n *node
for _, t := range tokens {
if l == nil {
return errors.New("No Matches subscription Found")
}
n = l.nodes[t]
if n != nil {
l = n.next
} else {
l = nil
}
}
if !s.removeFromNode(n, sub) {
return errors.New("No Matches subscription Found")
}
topic := string(sub.topic)
s.removeFromCache(topic, sub)
return nil
}
func (s *Sublist) removeFromNode(n *node, sub *subscription) (found bool) {
if n == nil {
return false
}
if sub.queue {
n.qsubs, found = removeSubFromList(sub, n.qsubs)
return found
} else {
n.psubs, found = removeSubFromList(sub, n.psubs)
return found
}
return false
}
func (s *Sublist) Match(topic string) *SublistResult {
s.RLock()
rc, ok := s.cache[topic]
s.RUnlock()
if ok {
return rc
}
tokens, err := PublishTopicCheckAndSpilt(topic)
if err != nil {
log.Error("\tserver/sublist.go: ", err)
return nil
}
result := &SublistResult{}
s.Lock()
l := s.root
if len(tokens) > 0 {
if tokens[0] == "/" {
if _, exist := l.nodes["#"]; exist {
addNodeToResults(l.nodes["#"], result)
}
if _, exist := l.nodes["+"]; exist {
matchLevel(l.nodes["/"].next, tokens[1:], result)
}
if _, exist := l.nodes["/"]; exist {
matchLevel(l.nodes["/"].next, tokens[1:], result)
}
} else {
matchLevel(s.root, tokens, result)
}
}
s.cache[topic] = result
if len(s.cache) > 1024 {
for k := range s.cache {
delete(s.cache, k)
break
}
}
s.Unlock()
return result
}
func matchLevel(l *level, toks []string, results *SublistResult) {
var swc, n *node
exist := false
for i, t := range toks {
if l == nil {
return
}
if _, exist = l.nodes["#"]; exist {
addNodeToResults(l.nodes["#"], results)
}
if t != "/" {
if swc, exist = l.nodes["+"]; exist {
matchLevel(l.nodes["+"].next, toks[i+1:], results)
}
} else {
if _, exist = l.nodes["+"]; exist {
addNodeToResults(l.nodes["+"], results)
}
}
n = l.nodes[t]
if n != nil {
l = n.next
} else {
l = nil
}
}
if n != nil {
addNodeToResults(n, results)
}
if swc != nil {
addNodeToResults(n, results)
}
}
// This will add in a node's results to the total results.
func addNodeToResults(n *node, results *SublistResult) {
results.psubs = append(results.psubs, n.psubs...)
results.qsubs = append(results.qsubs, n.qsubs...)
}
func removeSubFromList(sub *subscription, sl []*subscription) ([]*subscription, bool) {
for i := 0; i < len(sl); i++ {
if sl[i] == sub {
last := len(sl) - 1
sl[i] = sl[last]
sl[last] = nil
sl = sl[:last]
return shrinkAsNeeded(sl), true
}
}
return sl, false
}
// Checks if we need to do a resize. This is for very large growth then
// subsequent return to a more normal size from unsubscribe.
func shrinkAsNeeded(sl []*subscription) []*subscription {
lsl := len(sl)
csl := cap(sl)
// Don't bother if list not too big
if csl <= 8 {
return sl
}
pFree := float32(csl-lsl) / float32(csl)
if pFree > 0.50 {
return append([]*subscription(nil), sl...)
}
return sl
}

24
broker/usage.go Normal file
View File

@@ -0,0 +1,24 @@
package broker
var usageStr = `
Usage: hmq [options]
Broker Options:
-w, --worker <number> Worker num to process message, perfer (client num)/10. (default 1024)
-p, --port <port> Use port for clients (default: 1883)
--host <host> Network host to listen on. (default "0.0.0.0")
-ws, --wsport <port> Use port for websocket monitoring
-wsp,--wspath <path> Use path for websocket monitoring
-c, --config <file> Configuration file
Logging Options:
-d, --debug <bool> Enable debugging output (default false)
-D Debug and trace
Cluster Options:
-r, --router <rurl> Router who maintenance cluster info
-cp, --clusterport <cluster-port> Cluster listen port for others
Common Options:
-h, --help Show this message
`

View File

@@ -1,37 +0,0 @@
package broker
type Worker struct {
WorkerPool chan chan *Message
MsgChannel chan *Message
quit chan bool
}
func NewWorker(workerPool chan chan *Message) Worker {
return Worker{
WorkerPool: workerPool,
MsgChannel: make(chan *Message),
quit: make(chan bool)}
}
func (w Worker) Start() {
go func() {
for {
// register the current worker into the worker queue.
w.WorkerPool <- w.MsgChannel
select {
case msg := <-w.MsgChannel:
// we have received a work request.
ProcessMessage(msg)
case <-w.quit:
return
}
}
}()
}
// Stop signals the worker to stop listening for work requests.
func (w Worker) Stop() {
go func() {
w.quit <- true
}()
}

View File

@@ -2,11 +2,13 @@
"workerNum": 4096,
"port": "1883",
"host": "0.0.0.0",
"debug": true,
"cluster": {
"host": "0.0.0.0",
"port": "1993",
"routes": []
"port": "1993"
},
"httpPort": "8080",
"router": "127.0.0.1:9888",
"tlsPort": "8883",
"tlsHost": "0.0.0.0",
"wsPort": "1888",
@@ -18,6 +20,8 @@
"certFile": "ssl/server/cert.pem",
"keyFile": "ssl/server/key.pem"
},
"acl": false,
"aclConf": "conf/acl.conf"
"plugins": {
"auth": "authhttp",
"bridge": "kafka"
}
}

37
deploy/config.yaml Normal file
View File

@@ -0,0 +1,37 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: mqtt-broker
data:
hmq.config: |
{
"workerNum": 4096,
"port": "1883",
"host": "0.0.0.0",
"plugins": ["authhttp","kafka"]
}
kafka.json: |
{
"addr": [
"127.0.0.1:9090"
],
"onConnect": "onConnect",
"onPublish": "onPublish",
"onSubscribe": "onSubscribe",
"onDisconnect": "onDisconnect",
"onUnsubscribe": "onUnsubscribe",
"deliverMap": {
"#": "publish",
"/upload/+/#": "upload"
}
}
authhttp.json: |
{
"auth": "http://127.0.0.1:9090/mqtt/auth",
"acl": "http://127.0.0.1:9090/mqtt/acl",
"super": "http://127.0.0.1:9090/mqtt/superuser"
}

44
deploy/deploy.yaml Normal file
View File

@@ -0,0 +1,44 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: mqtt-broker
spec:
selector:
matchLabels:
app: mqtt-broker
replicas: 1
template:
metadata:
labels:
app: mqtt-broker
spec:
containers:
- name: mqtt-broker
image: uhub.service.ucloud.cn/uiot_core_hub/hmq:v0.1.0
ports:
- containerPort: 1883
- containerPort: 8080
volumeMounts:
- name: mqtt-broker
mountPath: /conf
subPath: hmq.config
- name: mqtt-broker
mountPath: /plugins/kafka/kafka.json
subPath: kafka.json
- name: mqtt-broker
mountPath: /plugins/authttp/http.json
subPath: kafka.json
volumes:
- name: mqtt-broker
configMap:
name: mqtt-broker
items:
- key: hmq.config
path: hmq.config
items:
- key: http.json
path: http.json
items:
- key: kafka.json
path: kafka.json

13
deploy/svc.yaml Normal file
View File

@@ -0,0 +1,13 @@
kind: Service
apiVersion: v1
metadata:
name: mqtt-broker
spec:
selector:
app: mqtt-broker
ports:
- protocol: TCP
port: 1883
targetPort: 1883
type: ClusterIP
sessionAffinity: ClientIP

27
go.mod Normal file
View File

@@ -0,0 +1,27 @@
module github.com/fhmq/hmq
go 1.12
require (
github.com/Shopify/sarama v1.26.1
github.com/bitly/go-simplejson v0.5.0
github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 // indirect
github.com/eclipse/paho.mqtt.golang v1.2.0
github.com/gin-gonic/gin v1.4.0
github.com/golang/protobuf v1.3.2 // indirect
github.com/kr/pretty v0.1.0 // indirect
github.com/patrickmn/go-cache v2.1.0+incompatible
github.com/pkg/errors v0.8.1 // indirect
github.com/satori/go.uuid v1.2.0
github.com/segmentio/fasthash v0.0.0-20180216231524-a72b379d632e
github.com/stretchr/testify v1.3.0
github.com/tidwall/gjson v1.3.0
go.uber.org/atomic v1.4.0 // indirect
go.uber.org/multierr v1.1.0 // indirect
go.uber.org/zap v1.10.0
golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4 // indirect
golang.org/x/net v0.0.0-20190724013045-ca1201d0de80
golang.org/x/sys v0.0.0-20190730183949-1393eb018365 // indirect
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 // indirect
gopkg.in/jcmturner/goidentity.v3 v3.0.0 // indirect
)

119
go.sum Normal file
View File

@@ -0,0 +1,119 @@
github.com/DataDog/zstd v1.3.6-0.20190409195224-796139022798 h1:2T/jmrHeTezcCM58lvEQXs0UpQJCo5SoGAcg+mbSTIg=
github.com/DataDog/zstd v1.3.6-0.20190409195224-796139022798/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo=
github.com/Shopify/sarama v1.23.0 h1:slvlbm7bxyp7sKQbUwha5BQdZTqurhRoI+zbKorVigQ=
github.com/Shopify/sarama v1.23.0/go.mod h1:XLH1GYJnLVE0XCr6KdJGVJRTwY30moWNJ4sERjXX6fs=
github.com/Shopify/toxiproxy v2.1.4+incompatible h1:TKdv8HiTLgE5wdJuEML90aBgNWsokNbMijUGhmcoBJc=
github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI=
github.com/bitly/go-simplejson v0.5.0 h1:6IH+V8/tVMab511d5bn4M7EwGXZf9Hj6i2xSwkNEM+Y=
github.com/bitly/go-simplejson v0.5.0/go.mod h1:cXHtHw4XUPsvGaxgjIAn8PhEWG9NfngEKAMDJEczWVA=
github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 h1:DDGfHa7BWjL4YnC6+E63dPcxHo2sUxDIu8g3QgEJdRY=
github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4=
github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/eapache/go-resiliency v1.1.0 h1:1NtRmCAqadE2FN4ZcN6g90TP3uk8cg9rn9eNK2197aU=
github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs=
github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21 h1:YEetp8/yCZMuEPMUDHG0CW/brkkEp8mzqk2+ODEitlw=
github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU=
github.com/eapache/queue v1.1.0 h1:YOEu7KNc61ntiQlcEeUIoDTJ2o8mQznoNvUhiigpIqc=
github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I=
github.com/eclipse/paho.mqtt.golang v1.2.0 h1:1F8mhG9+aO5/xpdtFkW4SxOJB67ukuDC3t2y2qayIX0=
github.com/eclipse/paho.mqtt.golang v1.2.0/go.mod h1:H9keYFcgq3Qr5OUJm/JZI/i6U7joQ8SYLhZwfeOo6Ts=
github.com/gin-contrib/sse v0.0.0-20190301062529-5545eab6dad3 h1:t8FVkw33L+wilf2QiWkw0UV77qRpcH/JHPKGpKa2E8g=
github.com/gin-contrib/sse v0.0.0-20190301062529-5545eab6dad3/go.mod h1:VJ0WA2NBN22VlZ2dKZQPAPnyWw5XTlK1KymzLKsr59s=
github.com/gin-gonic/gin v1.4.0 h1:3tMoCCfM7ppqsR0ptz/wi1impNpT7/9wQtMZ8lr1mCQ=
github.com/gin-gonic/gin v1.4.0/go.mod h1:OW2EZn3DO8Ln9oIKOvM++LBO+5UPHJJDH72/q/3rZdM=
github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg=
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4=
github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/hashicorp/go-uuid v1.0.1 h1:fv1ep09latC32wFoVwnqcnKJGnMSdBanPczbHAYm1BE=
github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
github.com/jcmturner/gofork v0.0.0-20190328161633-dc7c13fece03 h1:FUwcHNlEqkqLjLBdCp5PRlCFijNjvcYANOZXzCfXwCM=
github.com/jcmturner/gofork v0.0.0-20190328161633-dc7c13fece03/go.mod h1:MK8+TM0La+2rjBD4jE12Kj1pCCxK7d2LK/UM3ncEo0o=
github.com/json-iterator/go v1.1.6 h1:MrUvLMLTMxbqFJ9kzlvat/rYZqZnW3u4wkLzWTaFwKs=
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/mattn/go-isatty v0.0.7 h1:UvyT9uN+3r7yLEYSlJsbQGdsaB/a0DlgWP3pql6iwOc=
github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI=
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/patrickmn/go-cache v2.1.0+incompatible h1:HRMgzkcYKYpi3C8ajMPV8OFXaaRUnok+kx1WdO15EQc=
github.com/patrickmn/go-cache v2.1.0+incompatible/go.mod h1:3Qf8kWWT7OJRJbdiICTKqZju1ZixQ/KpMGzzAfe6+WQ=
github.com/pierrec/lz4 v0.0.0-20190327172049-315a67e90e41 h1:GeinFsrjWz97fAxVUEd748aV0cYL+I6k44gFJTCVvpU=
github.com/pierrec/lz4 v0.0.0-20190327172049-315a67e90e41/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc=
github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a h1:9ZKAASQSHhDYGoxY8uLVpewe1GDZ2vu2Tr/vTdVAkFQ=
github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
github.com/satori/go.uuid v1.2.0 h1:0uYX9dsZ2yD7q2RtLRtPSdGDWzjeM3TbMJP9utgA0ww=
github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0=
github.com/segmentio/fasthash v0.0.0-20180216231524-a72b379d632e h1:uO75wNGioszjmIzcY/tvdDYKRLVvzggtAmmJkn9j4GQ=
github.com/segmentio/fasthash v0.0.0-20180216231524-a72b379d632e/go.mod h1:tm/wZFQ8e24NYaBGIlnO2WGCAi67re4HHuOm0sftE/M=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/tidwall/gjson v1.3.0 h1:kfpsw1W3trbg4Xm6doUtqSl9+LhLB6qJ9PkltVAQZYs=
github.com/tidwall/gjson v1.3.0/go.mod h1:P256ACg0Mn+j1RXIDXoss50DeIABTYK1PULOJHhxOls=
github.com/tidwall/match v1.0.1 h1:PnKP62LPNxHKTwvHHZZzdOAOCtsJTjo6dZLCwpKm5xc=
github.com/tidwall/match v1.0.1/go.mod h1:LujAq0jyVjBy028G1WhWfIzbpQfMO8bBZ6Tyb0+pL9E=
github.com/tidwall/pretty v1.0.0 h1:HsD+QiTn7sK6flMKIvNmpqz1qrpP3Ps6jOKIKMooyg4=
github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk=
github.com/ugorji/go v1.1.4 h1:j4s+tAvLfL3bZyefP2SEWmhBzmuIlH/eqNuPdFPgngw=
github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc=
github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I=
github.com/xdg/stringprep v1.0.0/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y=
go.uber.org/atomic v1.4.0 h1:cxzIVoETapQEqDhQu3QfnvXAV4AlzcvUCxkVUFw3+EU=
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/multierr v1.1.0 h1:HoEmRHQPVSqub6w2z2d2EOVs2fjyFRGyofhKuyDq0QI=
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
go.uber.org/zap v1.10.0 h1:ORx85nbTijNz8ljznvCMR1ZBIPKFn3jQrag10X2AsuM=
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190404164418-38d8ce5564a5/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE=
golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4 h1:HuIa8hRrWRSrqYzx1qI49NNxhdi2PrY7gxVSq1JjLDc=
golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c h1:uOCk1iQW6Vc18bnC13MfzScl+wdKBmM9Y9kU7Z83/lw=
golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190724013045-ca1201d0de80 h1:Ao/3l156eZf2AW5wK8a7/smtodRU+gha3+BeqJ69lRk=
golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190730183949-1393eb018365 h1:SaXEMXhWzMJThc05vu6uh61Q245r4KaWMrsTedk0FDc=
golang.org/x/sys v0.0.0-20190730183949-1393eb018365/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/go-playground/assert.v1 v1.2.1 h1:xoYuJVE7KT85PYWrN730RguIQO0ePzVRfFMXadIrXTM=
gopkg.in/go-playground/assert.v1 v1.2.1/go.mod h1:9RXL0bg/zibRAgZUYszZSwO/z8Y/a8bDuhia5mkpMnE=
gopkg.in/go-playground/validator.v8 v8.18.2 h1:lFB4DoMU6B626w8ny76MV7VX6W2VHct2GVOI3xgiMrQ=
gopkg.in/go-playground/validator.v8 v8.18.2/go.mod h1:RX2a/7Ha8BgOhfk7j780h4/u/RRjR0eouCJSH80/M2Y=
gopkg.in/jcmturner/aescts.v1 v1.0.1 h1:cVVZBK2b1zY26haWB4vbBiZrfFQnfbTVrE3xZq6hrEw=
gopkg.in/jcmturner/aescts.v1 v1.0.1/go.mod h1:nsR8qBOg+OucoIW+WMhB3GspUQXq9XorLnQb9XtvcOo=
gopkg.in/jcmturner/dnsutils.v1 v1.0.1 h1:cIuC1OLRGZrld+16ZJvvZxVJeKPsvd5eUIvxfoN5hSM=
gopkg.in/jcmturner/dnsutils.v1 v1.0.1/go.mod h1:m3v+5svpVOhtFAP/wSz+yzh4Mc0Fg7eRhxkJMWSIz9Q=
gopkg.in/jcmturner/goidentity.v3 v3.0.0 h1:1duIyWiTaYvVx3YX2CYtpJbUFd7/UuPYCfgXtQ3VTbI=
gopkg.in/jcmturner/goidentity.v3 v3.0.0/go.mod h1:oG2kH0IvSYNIu80dVAyu/yoefjq1mNfM5bm88whjWx4=
gopkg.in/jcmturner/gokrb5.v7 v7.2.3 h1:hHMV/yKPwMnJhPuPx7pH2Uw/3Qyf+thJYlisUc44010=
gopkg.in/jcmturner/gokrb5.v7 v7.2.3/go.mod h1:l8VISx+WGYp+Fp7KRbsiUuXTTOnxIc3Tuvyavf11/WM=
gopkg.in/jcmturner/rpc.v1 v1.1.0 h1:QHIUxTX1ISuAv9dD2wJ9HWQVuWDX/Zc0PfeC2tjc4rU=
gopkg.in/jcmturner/rpc.v1 v1.1.0/go.mod h1:YIdkC4XfD6GXbzje11McwsDuOlZQSb9W4vfLvuNnlv8=
gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=

64
logger/logger.go Normal file
View File

@@ -0,0 +1,64 @@
/* Copyright (c) 2018, joy.zhou <chowyu08@gmail.com>
*/
package logger
import (
"go.uber.org/zap"
"go.uber.org/zap/zapcore"
)
var (
// env can be setup at build time with Go Linker. Value could be prod or whatever else for dev env
instance *zap.Logger
logCfg zap.Config
encoderCfg = zap.NewProductionEncoderConfig()
)
func init() {
encoderCfg.TimeKey = "timestamp"
encoderCfg.EncodeTime = zapcore.ISO8601TimeEncoder
}
// NewDevLogger return a logger for dev builds
func NewDevLogger() (*zap.Logger, error) {
logCfg := zap.NewProductionConfig()
logCfg.Level = zap.NewAtomicLevelAt(zap.DebugLevel)
// logCfg.DisableStacktrace = true
logCfg.EncoderConfig = encoderCfg
return logCfg.Build()
}
// NewProdLogger return a logger for production builds
func NewProdLogger() (*zap.Logger, error) {
logCfg := zap.NewProductionConfig()
logCfg.DisableStacktrace = true
logCfg.Level = zap.NewAtomicLevelAt(zap.InfoLevel)
logCfg.EncoderConfig = encoderCfg
return logCfg.Build()
}
func Prod() *zap.Logger {
l, _ := NewProdLogger()
instance = l
return instance
}
func Debug() *zap.Logger {
l, _ := NewDevLogger()
instance = l
return instance
}
func Get() *zap.Logger {
if instance == nil {
l, _ := NewProdLogger()
instance = l
}
return instance
}

33
logger/logger_test.go Normal file
View File

@@ -0,0 +1,33 @@
/*
Copyright (c) 2018, joy.zhou <chowyu08@gmail.com>
*/
package logger
import (
"testing"
"github.com/stretchr/testify/assert"
"go.uber.org/zap"
)
func TestGet(t *testing.T) {
var l *zap.Logger
logger := Get()
assert.NotNil(t, logger)
assert.IsType(t, l, logger)
}
func TestNewDevLogger(t *testing.T) {
logger, err := NewDevLogger()
assert.Nil(t, err)
assert.True(t, logger.Core().Enabled(zap.DebugLevel))
}
func TestNewProdLogger(t *testing.T) {
logger, err := NewProdLogger()
assert.Nil(t, err)
assert.False(t, logger.Core().Enabled(zap.DebugLevel))
}

34
main.go
View File

@@ -1,49 +1,29 @@
package main
import (
"hmq/broker"
"log"
"os"
"os/signal"
"runtime"
log "github.com/cihub/seelog"
"github.com/fhmq/hmq/broker"
)
func init() {
testConfig := `
<seelog type="sync">
<outputs formatid="main">
<console/>
</outputs>
<formats>
<format id="main" format="Time:%Date %Time%tfile:%File%tlevel:%LEVEL%t%Msg%n"/>
</formats>
</seelog>`
logger, err := log.LoggerFromConfigAsBytes([]byte(testConfig))
if err != nil {
panic(err)
}
log.ReplaceLogger(logger)
}
func main() {
runtime.GOMAXPROCS(runtime.NumCPU())
config, er := broker.LoadConfig()
if er != nil {
log.Error("Load Config file error: ", er)
return
config, err := broker.ConfigureConfig(os.Args[1:])
if err != nil {
log.Fatal("configure broker config error: ", err)
}
b, err := broker.NewBroker(config)
if err != nil {
log.Error("New Broker error: ", er)
return
log.Fatal("New Broker error: ", err)
}
b.Start()
s := waitForSignal()
log.Infof("signal got: %v ,broker closed.", s)
log.Println("signal received, broker closed.", s)
}
func waitForSignal() os.Signal {

27
plugins/auth/auth.go Normal file
View File

@@ -0,0 +1,27 @@
package auth
import (
authfile "github.com/fhmq/hmq/plugins/auth/authfile"
"github.com/fhmq/hmq/plugins/auth/authhttp"
)
const (
AuthHTTP = "authhttp"
AuthFile = "authfile"
)
type Auth interface {
CheckACL(action, clientID, username, ip, topic string) bool
CheckConnect(clientID, username, password string) bool
}
func NewAuth(name string) Auth {
switch name {
case AuthHTTP:
return authhttp.Init()
case AuthFile:
return authfile.Init()
default:
return &mockAuth{}
}
}

View File

@@ -0,0 +1,54 @@
## ACL Configure
```
Attention: Acl Type Change, change `pub =1, sub=2` to `sub =1, pub=2`
```
#### The ACL rules define:
~~~
Allow | type | value | pubsub | Topics
~~~
#### ACL Config
~~~
## type clientid , username, ipaddr
##sub 1 , pub 2, pubsub 3
## %c is clientid , %u is username
allow ip 127.0.0.1 2 $SYS/#
allow clientid 0001 3 #
allow username admin 3 #
allow username joy 3 /test,hello/world
allow clientid * 1 toCloud/%c
allow username * 1 toCloud/%u
deny clientid * 3 #
~~~
~~~
#allow local sub $SYS topic
allow ip 127.0.0.1 1 $SYS/#
~~~
~~~
#allow client who's id with 0001 or username with admin pub sub all topic
allow clientid 0001 3 #
allow username admin 3 #
~~~
~~~
#allow client with the username joy can pub sub topic '/test' and 'hello/world'
allow username joy 3 /test,hello/world
~~~
~~~
#allow all client pub the topic toCloud/{clientid/username}
allow clientid * 2 toCloud/%c
allow username * 2 toCloud/%u
~~~
~~~
#deny all client pub sub all topic
deny clientid * 3 #
~~~
Client match acl rule one by one
~~~
--------- --------- ---------
Client -> | Rule1 | --nomatch--> | Rule2 | --nomatch--> | Rule3 | -->
--------- --------- ---------
| | |
match match match
\|/ \|/ \|/
allow | deny allow | deny allow | deny
~~~

View File

@@ -1,4 +1,4 @@
## pub 1 , sub 2, pubsub 3
## sub 1 , pub 2, pubsub 3
## %c is clientid , %s is username
##auth type value pub/sub topic
allow ip 127.0.0.1 2 $SYS/#
@@ -9,4 +9,4 @@ allow clientid * 1 toCloud/%c
allow username * 1 toCloud/%u
allow clientid * 2 toDevice/%c
allow username * 2 toDevice/%u
deny clientid * 3 #
deny clientid * 3 #

View File

@@ -0,0 +1,23 @@
package acl
type aclAuth struct {
config *ACLConfig
}
func Init() *aclAuth {
aclConfig, err := AclConfigLoad("./plugins/auth/authfile/acl.conf")
if err != nil {
panic(err)
}
return &aclAuth{
config: aclConfig,
}
}
func (a *aclAuth) CheckConnect(clientID, username, password string) bool {
return true
}
func (a *aclAuth) CheckACL(action, clientID, username, ip, topic string) bool {
return checkTopicAuth(a.config, action, username, ip, clientID, topic)
}

View File

@@ -2,20 +2,20 @@ package acl
import "strings"
func CheckTopicAuth(ACLInfo *ACLConfig, typ int, ip, username, clientid, topic string) bool {
func checkTopicAuth(ACLInfo *ACLConfig, action, ip, username, clientid, topic string) bool {
for _, info := range ACLInfo.Info {
ctyp := info.Typ
switch ctyp {
case CLIENTID:
if match, auth := info.checkWithClientID(typ, clientid, topic); match {
if match, auth := info.checkWithClientID(action, clientid, topic); match {
return auth
}
case USERNAME:
if match, auth := info.checkWithUsername(typ, username, topic); match {
if match, auth := info.checkWithUsername(action, username, topic); match {
return auth
}
case IP:
if match, auth := info.checkWithIP(typ, ip, topic); match {
if match, auth := info.checkWithIP(action, ip, topic); match {
return auth
}
}
@@ -23,18 +23,18 @@ func CheckTopicAuth(ACLInfo *ACLConfig, typ int, ip, username, clientid, topic s
return false
}
func (a *AuthInfo) checkWithClientID(typ int, clientid, topic string) (bool, bool) {
func (a *AuthInfo) checkWithClientID(action, clientid, topic string) (bool, bool) {
auth := false
match := false
if a.Val == "*" || a.Val == clientid {
for _, tp := range a.Topics {
des := strings.Replace(tp, "%c", clientid, -1)
if typ == PUB {
if action == PUB {
if pubTopicMatch(topic, des) {
match = true
auth = a.checkAuth(PUB)
}
} else if typ == SUB {
} else if action == SUB {
if subTopicMatch(topic, des) {
match = true
auth = a.checkAuth(SUB)
@@ -45,18 +45,18 @@ func (a *AuthInfo) checkWithClientID(typ int, clientid, topic string) (bool, boo
return match, auth
}
func (a *AuthInfo) checkWithUsername(typ int, username, topic string) (bool, bool) {
func (a *AuthInfo) checkWithUsername(action, username, topic string) (bool, bool) {
auth := false
match := false
if a.Val == "*" || a.Val == username {
for _, tp := range a.Topics {
des := strings.Replace(tp, "%u", username, -1)
if typ == PUB {
if action == PUB {
if pubTopicMatch(topic, des) {
match = true
auth = a.checkAuth(PUB)
}
} else if typ == SUB {
} else if action == SUB {
if subTopicMatch(topic, des) {
match = true
auth = a.checkAuth(SUB)
@@ -67,18 +67,18 @@ func (a *AuthInfo) checkWithUsername(typ int, username, topic string) (bool, boo
return match, auth
}
func (a *AuthInfo) checkWithIP(typ int, ip, topic string) (bool, bool) {
func (a *AuthInfo) checkWithIP(action, ip, topic string) (bool, bool) {
auth := false
match := false
if a.Val == "*" || a.Val == ip {
for _, tp := range a.Topics {
des := tp
if typ == PUB {
if action == PUB {
if pubTopicMatch(topic, des) {
auth = a.checkAuth(PUB)
match = true
}
} else if typ == SUB {
} else if action == SUB {
if subTopicMatch(topic, des) {
auth = a.checkAuth(SUB)
match = true
@@ -89,15 +89,15 @@ func (a *AuthInfo) checkWithIP(typ int, ip, topic string) (bool, bool) {
return match, auth
}
func (a *AuthInfo) checkAuth(typ int) bool {
func (a *AuthInfo) checkAuth(action string) bool {
auth := false
if typ == PUB {
if action == PUB {
if a.Auth == ALLOW && (a.PubSub == PUB || a.PubSub == PUBSUB) {
auth = true
} else if a.Auth == DENY && a.PubSub == SUB {
auth = true
}
} else if typ == SUB {
} else if action == SUB {
if a.Auth == ALLOW && (a.PubSub == SUB || a.PubSub == PUBSUB) {
auth = true
} else if a.Auth == DENY && a.PubSub == PUB {

View File

@@ -5,14 +5,13 @@ import (
"errors"
"io"
"os"
"strconv"
"strings"
)
const (
PUB = 1
SUB = 2
PUBSUB = 3
SUB = "1"
PUB = "2"
PUBSUB = "3"
CLIENTID = "clientid"
USERNAME = "username"
IP = "ip"
@@ -24,7 +23,7 @@ type AuthInfo struct {
Auth string
Typ string
Val string
PubSub int
PubSub string
Topics []string
}
@@ -34,9 +33,6 @@ type ACLConfig struct {
}
func AclConfigLoad(file string) (*ACLConfig, error) {
if file == "" {
file = "./conf/acl.conf"
}
aclconifg := &ACLConfig{
File: file,
Info: make([]*AuthInfo, 0, 4),
@@ -79,12 +75,16 @@ func (c *ACLConfig) Prase() error {
parseErr = errors.New("\"" + line + "\" format is error")
break
}
var pubsub int
pubsub, err = strconv.Atoi(tmpArr[3])
if err != nil {
if tmpArr[3] != PUB && tmpArr[3] != SUB && tmpArr[3] != PUBSUB {
parseErr = errors.New("\"" + line + "\" format is error")
break
}
// var pubsub int
// pubsub, err = strconv.Atoi(tmpArr[3])
// if err != nil {
// parseErr = errors.New("\"" + line + "\" format is error")
// break
// }
topicStr := strings.Replace(tmpArr[4], " ", "", -1)
topicStr = strings.Replace(topicStr, "\n", "", -1)
topics := strings.Split(topicStr, ",")
@@ -93,7 +93,7 @@ func (c *ACLConfig) Prase() error {
Typ: tmpArr[1],
Val: tmpArr[2],
Topics: topics,
PubSub: pubsub,
PubSub: tmpArr[3],
}
c.Info = append(c.Info, tmpAuth)
if err != nil {

View File

@@ -0,0 +1,179 @@
package authhttp
import (
"encoding/json"
"io"
"io/ioutil"
"net/http"
"net/url"
"strconv"
"strings"
"time"
"github.com/fhmq/hmq/logger"
"go.uber.org/zap"
)
//Config device kafka config
type Config struct {
AuthURL string `json:"auth"`
ACLURL string `json:"acl"`
SuperURL string `json:"super"`
}
type authHTTP struct {
client *http.Client
}
var (
config Config
log = logger.Get().Named("authhttp")
httpClient *http.Client
)
//Init init kafak client
func Init() *authHTTP {
content, err := ioutil.ReadFile("./plugins/auth/authhttp/http.json")
if err != nil {
log.Fatal("Read config file error: ", zap.Error(err))
}
// log.Info(string(content))
err = json.Unmarshal(content, &config)
if err != nil {
log.Fatal("Unmarshal config file error: ", zap.Error(err))
}
// fmt.Println("http: config: ", config)
httpClient = &http.Client{
Transport: &http.Transport{
MaxConnsPerHost: 100,
MaxIdleConns: 100,
MaxIdleConnsPerHost: 100,
},
Timeout: time.Second * 100,
}
return &authHTTP{client: httpClient}
}
//CheckAuth check mqtt connect
func (a *authHTTP) CheckConnect(clientID, username, password string) bool {
action := "connect"
{
aCache := checkCache(action, clientID, username, password, "")
if aCache != nil {
if aCache.password == password && aCache.username == username && aCache.action == action {
return true
}
}
}
data := url.Values{}
data.Add("username", username)
data.Add("clientid", clientID)
data.Add("password", password)
req, err := http.NewRequest("POST", config.AuthURL, strings.NewReader(data.Encode()))
if err != nil {
log.Error("new request super: ", zap.Error(err))
return false
}
req.Header.Add("Content-Type", "application/x-www-form-urlencoded")
req.Header.Add("Content-Length", strconv.Itoa(len(data.Encode())))
resp, err := a.client.Do(req)
if err != nil {
log.Error("request super: ", zap.Error(err))
return false
}
defer resp.Body.Close()
io.Copy(ioutil.Discard, resp.Body)
if resp.StatusCode == http.StatusOK {
addCache(action, clientID, username, password, "")
return true
}
return false
}
// //CheckSuper check mqtt connect
// func CheckSuper(clientID, username, password string) bool {
// action := "connect"
// {
// aCache := checkCache(action, clientID, username, password, "")
// if aCache != nil {
// if aCache.password == password && aCache.username == username && aCache.action == action {
// return true
// }
// }
// }
// data := url.Values{}
// data.Add("username", username)
// data.Add("clientid", clientID)
// data.Add("password", password)
// req, err := http.NewRequest("POST", config.SuperURL, strings.NewReader(data.Encode()))
// if err != nil {
// log.Error("new request super: ", zap.Error(err))
// return false
// }
// req.Header.Add("Content-Type", "application/x-www-form-urlencoded")
// req.Header.Add("Content-Length", strconv.Itoa(len(data.Encode())))
// resp, err := httpClient.Do(req)
// if err != nil {
// log.Error("request super: ", zap.Error(err))
// return false
// }
// defer resp.Body.Close()
// io.Copy(ioutil.Discard, resp.Body)
// if resp.StatusCode == http.StatusOK {
// return true
// }
// return false
// }
//CheckACL check mqtt connect
func (a *authHTTP) CheckACL(action, clientID, username, ip, topic string) bool {
{
aCache := checkCache(action, "", username, "", topic)
if aCache != nil {
if aCache.topic == topic && aCache.action == action {
return true
}
}
}
req, err := http.NewRequest("GET", config.ACLURL, nil)
if err != nil {
log.Error("get acl: ", zap.Error(err))
return false
}
data := req.URL.Query()
data.Add("username", username)
data.Add("topic", topic)
data.Add("access", action)
req.URL.RawQuery = data.Encode()
// fmt.Println("req:", req)
resp, err := a.client.Do(req)
if err != nil {
log.Error("request acl: ", zap.Error(err))
return false
}
defer resp.Body.Close()
io.Copy(ioutil.Discard, resp.Body)
if resp.StatusCode == http.StatusOK {
addCache(action, "", username, "", topic)
return true
}
return false
}

View File

@@ -0,0 +1,32 @@
package authhttp
import (
"time"
"github.com/patrickmn/go-cache"
)
type authCache struct {
action string
username string
clientID string
password string
topic string
}
var (
// cache = make(map[string]authCache)
c = cache.New(5*time.Minute, 10*time.Minute)
)
func checkCache(action, clientID, username, password, topic string) *authCache {
authc, found := c.Get(username)
if found {
return authc.(*authCache)
}
return nil
}
func addCache(action, clientID, username, password, topic string) {
c.Set(username, &authCache{action: action, username: username, clientID: clientID, password: password, topic: topic}, cache.DefaultExpiration)
}

View File

@@ -0,0 +1,5 @@
{
"auth": "http://127.0.0.1:9090/mqtt/auth",
"acl": "http://127.0.0.1:9090/mqtt/acl",
"super": "http://127.0.0.1:9090/mqtt/superuser"
}

11
plugins/auth/mock.go Normal file
View File

@@ -0,0 +1,11 @@
package auth
type mockAuth struct{}
func (m *mockAuth) CheckACL(action, clientID, username, ip, topic string) bool {
return true
}
func (m *mockAuth) CheckConnect(clientID, username, password string) bool {
return true
}

49
plugins/bridge/bridge.go Normal file
View File

@@ -0,0 +1,49 @@
package bridge
import "github.com/fhmq/hmq/logger"
const (
//Connect mqtt connect
Connect = "connect"
//Publish mqtt publish
Publish = "publish"
//Subscribe mqtt sub
Subscribe = "subscribe"
//Unsubscribe mqtt sub
Unsubscribe = "unsubscribe"
//Disconnect mqtt disconenct
Disconnect = "disconnect"
)
var (
log = logger.Get().Named("bridge")
)
//Elements kafka publish elements
type Elements struct {
ClientID string `json:"clientid"`
Username string `json:"username"`
Topic string `json:"topic"`
Payload string `json:"payload"`
Timestamp int64 `json:"ts"`
Size int32 `json:"size"`
Action string `json:"action"`
}
const (
//Kafka plugin name
Kafka = "kafka"
)
type BridgeMQ interface {
Publish(e *Elements) error
}
func NewBridgeMQ(name string) BridgeMQ {
switch name {
case Kafka:
return InitKafka()
default:
return &mockMQ{}
}
}

156
plugins/bridge/kafka.go Normal file
View File

@@ -0,0 +1,156 @@
package bridge
import (
"encoding/json"
"errors"
"io/ioutil"
"strings"
"time"
"github.com/Shopify/sarama"
"go.uber.org/zap"
)
type kafakConfig struct {
Addr []string `json:"addr"`
ConnectTopic string `json:"onConnect"`
SubscribeTopic string `json:"onSubscribe"`
PublishTopic string `json:"onPublish"`
UnsubscribeTopic string `json:"onUnsubscribe"`
DisconnectTopic string `json:"onDisconnect"`
DeliverMap map[string]string `json:"deliverMap"`
}
type kafka struct {
kafakConfig kafakConfig
kafkaClient sarama.AsyncProducer
}
//Init init kafak client
func InitKafka() *kafka {
log.Info("start connect kafka....")
content, err := ioutil.ReadFile("./plugins/kafka/kafka.json")
if err != nil {
log.Fatal("Read config file error: ", zap.Error(err))
}
// log.Info(string(content))
var config kafakConfig
err = json.Unmarshal(content, &config)
if err != nil {
log.Fatal("Unmarshal config file error: ", zap.Error(err))
}
c := &kafka{kafakConfig: config}
c.connect()
return c
}
//connect
func (k *kafka) connect() {
conf := sarama.NewConfig()
conf.Version = sarama.V1_1_1_0
kafkaClient, err := sarama.NewAsyncProducer(k.kafakConfig.Addr, conf)
if err != nil {
log.Fatal("create kafka async producer failed: ", zap.Error(err))
}
go func() {
for err := range kafkaClient.Errors() {
log.Error("send msg to kafka failed: ", zap.Error(err))
}
}()
k.kafkaClient = kafkaClient
}
//Publish publish to kafka
func (k *kafka) Publish(e *Elements) error {
config := k.kafakConfig
key := e.ClientID
topics := make(map[string]bool)
switch e.Action {
case Connect:
if config.ConnectTopic != "" {
topics[config.ConnectTopic] = true
}
case Publish:
if config.PublishTopic != "" {
topics[config.PublishTopic] = true
}
// foreach regexp map config
for reg, topic := range config.DeliverMap {
match := matchTopic(reg, e.Topic)
if match {
topics[topic] = true
}
}
case Subscribe:
if config.SubscribeTopic != "" {
topics[config.SubscribeTopic] = true
}
case Unsubscribe:
if config.UnsubscribeTopic != "" {
topics[config.UnsubscribeTopic] = true
}
case Disconnect:
if config.DisconnectTopic != "" {
topics[config.DisconnectTopic] = true
}
default:
return errors.New("error action: " + e.Action)
}
return k.publish(topics, key, e)
}
func (k *kafka) publish(topics map[string]bool, key string, msg *Elements) error {
payload, err := json.Marshal(msg)
if err != nil {
return err
}
for topic, _ := range topics {
select {
case k.kafkaClient.Input() <- &sarama.ProducerMessage{
Topic: topic,
Key: sarama.ByteEncoder(key),
Value: sarama.ByteEncoder(payload),
}:
continue
case <-time.After(5 * time.Second):
return errors.New("write kafka timeout")
}
}
return nil
}
func match(subTopic []string, topic []string) bool {
if len(subTopic) == 0 {
if len(topic) == 0 {
return true
}
return false
}
if len(topic) == 0 {
if subTopic[0] == "#" {
return true
}
return false
}
if subTopic[0] == "#" {
return true
}
if (subTopic[0] == "+") || (subTopic[0] == topic[0]) {
return match(subTopic[1:], topic[1:])
}
return false
}
func matchTopic(subTopic string, topic string) bool {
return match(strings.Split(subTopic, "/"), strings.Split(topic, "/"))
}

View File

@@ -0,0 +1,14 @@
{
"addr": [
"127.0.0.1:9090"
],
"onConnect": "onConnect",
"onPublish": "onPublish",
"onSubscribe": "onSubscribe",
"onDisconnect": "onDisconnect",
"onUnsubscribe": "onUnsubscribe",
"deliverMap": {
"#": "publish",
"/upload/+/#": "upload"
}
}

7
plugins/bridge/mock.go Normal file
View File

@@ -0,0 +1,7 @@
package bridge
type mockMQ struct{}
func (m *mockMQ) Publish(e *Elements) error {
return nil
}

58
pool/fixpool.go Normal file
View File

@@ -0,0 +1,58 @@
package pool
import (
"github.com/segmentio/fasthash/fnv1a"
)
type WorkerPool struct {
maxWorkers int
taskQueue []chan func()
stoppedChan chan struct{}
}
func New(maxWorkers int) *WorkerPool {
// There must be at least one worker.
if maxWorkers < 1 {
maxWorkers = 1
}
// taskQueue is unbuffered since items are always removed immediately.
pool := &WorkerPool{
taskQueue: make([]chan func(), maxWorkers),
maxWorkers: maxWorkers,
stoppedChan: make(chan struct{}),
}
// Start the task dispatcher.
pool.dispatch()
return pool
}
func (p *WorkerPool) Submit(uid string, task func()) {
idx := fnv1a.HashString64(uid) % uint64(p.maxWorkers)
if task != nil {
p.taskQueue[idx] <- task
}
}
func (p *WorkerPool) dispatch() {
for i := 0; i < p.maxWorkers; i++ {
p.taskQueue[i] = make(chan func(), 1024)
go startWorker(p.taskQueue[i])
}
}
func startWorker(taskChan chan func()) {
go func() {
var task func()
var ok bool
for {
task, ok = <-taskChan
if !ok {
break
}
// Execute the task.
task()
}
}()
}

166
pool/pool.go Normal file
View File

@@ -0,0 +1,166 @@
package pool
// import "time"
// const (
// // This value is the size of the queue that workers register their
// // availability to the dispatcher. There may be hundreds of workers, but
// // only a small channel is needed to register some of the workers.
// readyQueueSize = 64
// // If worker pool receives no new work for this period of time, then stop
// // a worker goroutine.
// idleTimeoutSec = 5
// )
// type WorkerPool struct {
// maxWorkers int
// timeout time.Duration
// taskQueue chan func()
// readyWorkers chan chan func()
// stoppedChan chan struct{}
// }
// func New(maxWorkers int) *WorkerPool {
// // There must be at least one worker.
// if maxWorkers < 1 {
// maxWorkers = 1
// }
// // taskQueue is unbuffered since items are always removed immediately.
// pool := &WorkerPool{
// taskQueue: make(chan func()),
// maxWorkers: maxWorkers,
// readyWorkers: make(chan chan func(), readyQueueSize),
// timeout: time.Second * idleTimeoutSec,
// stoppedChan: make(chan struct{}),
// }
// // Start the task dispatcher.
// go pool.dispatch()
// return pool
// }
// func (p *WorkerPool) Stop() {
// if p.Stopped() {
// return
// }
// close(p.taskQueue)
// <-p.stoppedChan
// }
// func (p *WorkerPool) Stopped() bool {
// select {
// case <-p.stoppedChan:
// return true
// default:
// }
// return false
// }
// func (p *WorkerPool) Submit(task func()) {
// if task != nil {
// p.taskQueue <- task
// }
// }
// func (p *WorkerPool) SubmitWait(task func()) {
// if task == nil {
// return
// }
// doneChan := make(chan struct{})
// p.taskQueue <- func() {
// task()
// close(doneChan)
// }
// <-doneChan
// }
// func (p *WorkerPool) dispatch() {
// defer close(p.stoppedChan)
// timeout := time.NewTimer(p.timeout)
// var workerCount int
// var task func()
// var ok bool
// var workerTaskChan chan func()
// startReady := make(chan chan func())
// Loop:
// for {
// timeout.Reset(p.timeout)
// select {
// case task, ok = <-p.taskQueue:
// if !ok {
// break Loop
// }
// // Got a task to do.
// select {
// case workerTaskChan = <-p.readyWorkers:
// // A worker is ready, so give task to worker.
// workerTaskChan <- task
// default:
// // No workers ready.
// // Create a new worker, if not at max.
// if workerCount < p.maxWorkers {
// workerCount++
// go func(t func()) {
// startWorker(startReady, p.readyWorkers)
// // Submit the task when the new worker.
// taskChan := <-startReady
// taskChan <- t
// }(task)
// } else {
// // Start a goroutine to submit the task when an existing
// // worker is ready.
// go func(t func()) {
// taskChan := <-p.readyWorkers
// taskChan <- t
// }(task)
// }
// }
// case <-timeout.C:
// // Timed out waiting for work to arrive. Kill a ready worker.
// if workerCount > 0 {
// select {
// case workerTaskChan = <-p.readyWorkers:
// // A worker is ready, so kill.
// close(workerTaskChan)
// workerCount--
// default:
// // No work, but no ready workers. All workers are busy.
// }
// }
// }
// }
// // Stop all remaining workers as they become ready.
// for workerCount > 0 {
// workerTaskChan = <-p.readyWorkers
// close(workerTaskChan)
// workerCount--
// }
// }
// func startWorker(startReady, readyWorkers chan chan func()) {
// go func() {
// taskChan := make(chan func())
// var task func()
// var ok bool
// // Register availability on starReady channel.
// startReady <- taskChan
// for {
// // Read task from dispatcher.
// task, ok = <-taskChan
// if !ok {
// // Dispatcher has told worker to stop.
// break
// }
// // Execute the task.
// task()
// // Register availability on readyWorkers channel.
// readyWorkers <- taskChan
// }
// }()
// }