mirror of
https://github.com/fhmq/hmq.git
synced 2026-05-02 14:28:34 +00:00
Compare commits
52 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
e940c551e3 | ||
|
|
8eaec45cd8 | ||
|
|
a75adfdba4 | ||
|
|
fcfba55567 | ||
|
|
87aaf9bd60 | ||
|
|
8513477998 | ||
|
|
551cbd839a | ||
|
|
d75c79a160 | ||
|
|
d5bf973f53 | ||
|
|
84e7fe2490 | ||
|
|
684584b208 | ||
|
|
56fb4a2d54 | ||
|
|
5ed4728575 | ||
|
|
c0fea6a5ba | ||
|
|
47500910e1 | ||
|
|
0ff20b6ee2 | ||
|
|
7155667f6c | ||
|
|
83db82cdcc | ||
|
|
b3653bcfb1 | ||
|
|
221d00480e | ||
|
|
91733bf91e | ||
|
|
ef252550dc | ||
|
|
1058256235 | ||
|
|
5a569f14a3 | ||
|
|
93b21777ff | ||
|
|
dcf2934e1b | ||
|
|
d9e6e216b0 | ||
|
|
ca3951769a | ||
|
|
0439e7ce90 | ||
|
|
dc0f2185ab | ||
|
|
7462afcfb5 | ||
|
|
114e6f901e | ||
|
|
0cb51bd37a | ||
|
|
819b4725f2 | ||
|
|
85bdeccbfc | ||
|
|
1339a04b28 | ||
|
|
957329d85c | ||
|
|
7db7edaa17 | ||
|
|
1d6f6a4a71 | ||
|
|
123bb7210f | ||
|
|
9ad6590e83 | ||
|
|
516db49db5 | ||
|
|
a260057bfe | ||
|
|
bdd802ebfb | ||
|
|
5786e69b01 | ||
|
|
6a89b627d4 | ||
|
|
208a7cf0a8 | ||
|
|
a7fb7f1912 | ||
|
|
eeab0c6b7d | ||
|
|
4646042b7f | ||
|
|
49385e52fd | ||
|
|
3ed8625bb9 |
1
.gitignore
vendored
1
.gitignore
vendored
@@ -1,3 +1,4 @@
|
|||||||
hmq
|
hmq
|
||||||
log
|
log
|
||||||
log/*
|
log/*
|
||||||
|
*.test
|
||||||
@@ -1,6 +1,5 @@
|
|||||||
FROM alpine
|
FROM alpine
|
||||||
COPY hmq /
|
COPY hmq /
|
||||||
COPY hmq.config /
|
|
||||||
COPY ssl /ssl
|
COPY ssl /ssl
|
||||||
COPY conf /conf
|
COPY conf /conf
|
||||||
|
|
||||||
|
|||||||
201
LICENSE
Normal file
201
LICENSE
Normal file
@@ -0,0 +1,201 @@
|
|||||||
|
Apache License
|
||||||
|
Version 2.0, January 2004
|
||||||
|
http://www.apache.org/licenses/
|
||||||
|
|
||||||
|
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||||
|
|
||||||
|
1. Definitions.
|
||||||
|
|
||||||
|
"License" shall mean the terms and conditions for use, reproduction,
|
||||||
|
and distribution as defined by Sections 1 through 9 of this document.
|
||||||
|
|
||||||
|
"Licensor" shall mean the copyright owner or entity authorized by
|
||||||
|
the copyright owner that is granting the License.
|
||||||
|
|
||||||
|
"Legal Entity" shall mean the union of the acting entity and all
|
||||||
|
other entities that control, are controlled by, or are under common
|
||||||
|
control with that entity. For the purposes of this definition,
|
||||||
|
"control" means (i) the power, direct or indirect, to cause the
|
||||||
|
direction or management of such entity, whether by contract or
|
||||||
|
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||||
|
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||||
|
|
||||||
|
"You" (or "Your") shall mean an individual or Legal Entity
|
||||||
|
exercising permissions granted by this License.
|
||||||
|
|
||||||
|
"Source" form shall mean the preferred form for making modifications,
|
||||||
|
including but not limited to software source code, documentation
|
||||||
|
source, and configuration files.
|
||||||
|
|
||||||
|
"Object" form shall mean any form resulting from mechanical
|
||||||
|
transformation or translation of a Source form, including but
|
||||||
|
not limited to compiled object code, generated documentation,
|
||||||
|
and conversions to other media types.
|
||||||
|
|
||||||
|
"Work" shall mean the work of authorship, whether in Source or
|
||||||
|
Object form, made available under the License, as indicated by a
|
||||||
|
copyright notice that is included in or attached to the work
|
||||||
|
(an example is provided in the Appendix below).
|
||||||
|
|
||||||
|
"Derivative Works" shall mean any work, whether in Source or Object
|
||||||
|
form, that is based on (or derived from) the Work and for which the
|
||||||
|
editorial revisions, annotations, elaborations, or other modifications
|
||||||
|
represent, as a whole, an original work of authorship. For the purposes
|
||||||
|
of this License, Derivative Works shall not include works that remain
|
||||||
|
separable from, or merely link (or bind by name) to the interfaces of,
|
||||||
|
the Work and Derivative Works thereof.
|
||||||
|
|
||||||
|
"Contribution" shall mean any work of authorship, including
|
||||||
|
the original version of the Work and any modifications or additions
|
||||||
|
to that Work or Derivative Works thereof, that is intentionally
|
||||||
|
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||||
|
or by an individual or Legal Entity authorized to submit on behalf of
|
||||||
|
the copyright owner. For the purposes of this definition, "submitted"
|
||||||
|
means any form of electronic, verbal, or written communication sent
|
||||||
|
to the Licensor or its representatives, including but not limited to
|
||||||
|
communication on electronic mailing lists, source code control systems,
|
||||||
|
and issue tracking systems that are managed by, or on behalf of, the
|
||||||
|
Licensor for the purpose of discussing and improving the Work, but
|
||||||
|
excluding communication that is conspicuously marked or otherwise
|
||||||
|
designated in writing by the copyright owner as "Not a Contribution."
|
||||||
|
|
||||||
|
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||||
|
on behalf of whom a Contribution has been received by Licensor and
|
||||||
|
subsequently incorporated within the Work.
|
||||||
|
|
||||||
|
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
copyright license to reproduce, prepare Derivative Works of,
|
||||||
|
publicly display, publicly perform, sublicense, and distribute the
|
||||||
|
Work and such Derivative Works in Source or Object form.
|
||||||
|
|
||||||
|
3. Grant of Patent License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
(except as stated in this section) patent license to make, have made,
|
||||||
|
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||||
|
where such license applies only to those patent claims licensable
|
||||||
|
by such Contributor that are necessarily infringed by their
|
||||||
|
Contribution(s) alone or by combination of their Contribution(s)
|
||||||
|
with the Work to which such Contribution(s) was submitted. If You
|
||||||
|
institute patent litigation against any entity (including a
|
||||||
|
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||||
|
or a Contribution incorporated within the Work constitutes direct
|
||||||
|
or contributory patent infringement, then any patent licenses
|
||||||
|
granted to You under this License for that Work shall terminate
|
||||||
|
as of the date such litigation is filed.
|
||||||
|
|
||||||
|
4. Redistribution. You may reproduce and distribute copies of the
|
||||||
|
Work or Derivative Works thereof in any medium, with or without
|
||||||
|
modifications, and in Source or Object form, provided that You
|
||||||
|
meet the following conditions:
|
||||||
|
|
||||||
|
(a) You must give any other recipients of the Work or
|
||||||
|
Derivative Works a copy of this License; and
|
||||||
|
|
||||||
|
(b) You must cause any modified files to carry prominent notices
|
||||||
|
stating that You changed the files; and
|
||||||
|
|
||||||
|
(c) You must retain, in the Source form of any Derivative Works
|
||||||
|
that You distribute, all copyright, patent, trademark, and
|
||||||
|
attribution notices from the Source form of the Work,
|
||||||
|
excluding those notices that do not pertain to any part of
|
||||||
|
the Derivative Works; and
|
||||||
|
|
||||||
|
(d) If the Work includes a "NOTICE" text file as part of its
|
||||||
|
distribution, then any Derivative Works that You distribute must
|
||||||
|
include a readable copy of the attribution notices contained
|
||||||
|
within such NOTICE file, excluding those notices that do not
|
||||||
|
pertain to any part of the Derivative Works, in at least one
|
||||||
|
of the following places: within a NOTICE text file distributed
|
||||||
|
as part of the Derivative Works; within the Source form or
|
||||||
|
documentation, if provided along with the Derivative Works; or,
|
||||||
|
within a display generated by the Derivative Works, if and
|
||||||
|
wherever such third-party notices normally appear. The contents
|
||||||
|
of the NOTICE file are for informational purposes only and
|
||||||
|
do not modify the License. You may add Your own attribution
|
||||||
|
notices within Derivative Works that You distribute, alongside
|
||||||
|
or as an addendum to the NOTICE text from the Work, provided
|
||||||
|
that such additional attribution notices cannot be construed
|
||||||
|
as modifying the License.
|
||||||
|
|
||||||
|
You may add Your own copyright statement to Your modifications and
|
||||||
|
may provide additional or different license terms and conditions
|
||||||
|
for use, reproduction, or distribution of Your modifications, or
|
||||||
|
for any such Derivative Works as a whole, provided Your use,
|
||||||
|
reproduction, and distribution of the Work otherwise complies with
|
||||||
|
the conditions stated in this License.
|
||||||
|
|
||||||
|
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||||
|
any Contribution intentionally submitted for inclusion in the Work
|
||||||
|
by You to the Licensor shall be under the terms and conditions of
|
||||||
|
this License, without any additional terms or conditions.
|
||||||
|
Notwithstanding the above, nothing herein shall supersede or modify
|
||||||
|
the terms of any separate license agreement you may have executed
|
||||||
|
with Licensor regarding such Contributions.
|
||||||
|
|
||||||
|
6. Trademarks. This License does not grant permission to use the trade
|
||||||
|
names, trademarks, service marks, or product names of the Licensor,
|
||||||
|
except as required for reasonable and customary use in describing the
|
||||||
|
origin of the Work and reproducing the content of the NOTICE file.
|
||||||
|
|
||||||
|
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||||
|
agreed to in writing, Licensor provides the Work (and each
|
||||||
|
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
implied, including, without limitation, any warranties or conditions
|
||||||
|
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||||
|
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||||
|
appropriateness of using or redistributing the Work and assume any
|
||||||
|
risks associated with Your exercise of permissions under this License.
|
||||||
|
|
||||||
|
8. Limitation of Liability. In no event and under no legal theory,
|
||||||
|
whether in tort (including negligence), contract, or otherwise,
|
||||||
|
unless required by applicable law (such as deliberate and grossly
|
||||||
|
negligent acts) or agreed to in writing, shall any Contributor be
|
||||||
|
liable to You for damages, including any direct, indirect, special,
|
||||||
|
incidental, or consequential damages of any character arising as a
|
||||||
|
result of this License or out of the use or inability to use the
|
||||||
|
Work (including but not limited to damages for loss of goodwill,
|
||||||
|
work stoppage, computer failure or malfunction, or any and all
|
||||||
|
other commercial damages or losses), even if such Contributor
|
||||||
|
has been advised of the possibility of such damages.
|
||||||
|
|
||||||
|
9. Accepting Warranty or Additional Liability. While redistributing
|
||||||
|
the Work or Derivative Works thereof, You may choose to offer,
|
||||||
|
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||||
|
or other liability obligations and/or rights consistent with this
|
||||||
|
License. However, in accepting such obligations, You may act only
|
||||||
|
on Your own behalf and on Your sole responsibility, not on behalf
|
||||||
|
of any other Contributor, and only if You agree to indemnify,
|
||||||
|
defend, and hold each Contributor harmless for any liability
|
||||||
|
incurred by, or claims asserted against, such Contributor by reason
|
||||||
|
of your accepting any such warranty or additional liability.
|
||||||
|
|
||||||
|
END OF TERMS AND CONDITIONS
|
||||||
|
|
||||||
|
APPENDIX: How to apply the Apache License to your work.
|
||||||
|
|
||||||
|
To apply the Apache License to your work, attach the following
|
||||||
|
boilerplate notice, with the fields enclosed by brackets "{}"
|
||||||
|
replaced with your own identifying information. (Don't include
|
||||||
|
the brackets!) The text should be enclosed in the appropriate
|
||||||
|
comment syntax for the file format. We also recommend that a
|
||||||
|
file or class name and description of purpose be included on the
|
||||||
|
same "printed page" as the copyright notice for easier
|
||||||
|
identification within third-party archives.
|
||||||
|
|
||||||
|
Copyright {yyyy} {name of copyright owner}
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
70
README.md
70
README.md
@@ -3,16 +3,42 @@ Free and High Performance MQTT Broker
|
|||||||
|
|
||||||
## About
|
## About
|
||||||
Golang MQTT Broker, Version 3.1.1, and Compatible
|
Golang MQTT Broker, Version 3.1.1, and Compatible
|
||||||
for [eclipse paho client](https://github.com/eclipse?utf8=%E2%9C%93&q=mqtt&type=&language=)
|
for [eclipse paho client](https://github.com/eclipse?utf8=%E2%9C%93&q=mqtt&type=&language=) and mosquitto-client
|
||||||
|
|
||||||
|
Download: [click here](https://github.com/fhmq/hmq/releases)
|
||||||
|
|
||||||
## RUNNING
|
## RUNNING
|
||||||
```bash
|
```bash
|
||||||
$ git clone https://github.com/fhmq/hmq.git
|
$ go get github.com/fhmq/hmq
|
||||||
$ cd hmq
|
$ cd $GOPATH/github.com/fhmq/hmq
|
||||||
$ go run main.go
|
$ go run main.go
|
||||||
```
|
```
|
||||||
|
|
||||||
### broker.config
|
## Usage of hmq:
|
||||||
|
~~~
|
||||||
|
Usage: hmq [options]
|
||||||
|
|
||||||
|
Broker Options:
|
||||||
|
-w, --worker <number> Worker num to process message, perfer (client num)/10. (default 1024)
|
||||||
|
-p, --port <port> Use port for clients (default: 1883)
|
||||||
|
--host <host> Network host to listen on. (default "0.0.0.0")
|
||||||
|
-ws, --wsport <port> Use port for websocket monitoring
|
||||||
|
-wsp,--wspath <path> Use path for websocket monitoring
|
||||||
|
-c, --config <file> Configuration file
|
||||||
|
|
||||||
|
Logging Options:
|
||||||
|
-d, --debug <bool> Enable debugging output (default false)
|
||||||
|
-D Debug enabled
|
||||||
|
|
||||||
|
Cluster Options:
|
||||||
|
-r, --router <rurl> Router who maintenance cluster info
|
||||||
|
-cp, --clusterport <cluster-port> Cluster listen port for others
|
||||||
|
|
||||||
|
Common Options:
|
||||||
|
-h, --help Show this message
|
||||||
|
~~~
|
||||||
|
|
||||||
|
### hmq.config
|
||||||
~~~
|
~~~
|
||||||
{
|
{
|
||||||
"workerNum": 4096,
|
"workerNum": 4096,
|
||||||
@@ -20,9 +46,9 @@ $ go run main.go
|
|||||||
"host": "0.0.0.0",
|
"host": "0.0.0.0",
|
||||||
"cluster": {
|
"cluster": {
|
||||||
"host": "0.0.0.0",
|
"host": "0.0.0.0",
|
||||||
"port": "1993",
|
"port": "1993"
|
||||||
"routers": ["10.10.0.11:1993","10.10.0.12:1993"]
|
|
||||||
},
|
},
|
||||||
|
"router": "127.0.0.1:9888",
|
||||||
"wsPort": "1888",
|
"wsPort": "1888",
|
||||||
"wsPath": "/ws",
|
"wsPath": "/ws",
|
||||||
"wsTLS": true,
|
"wsTLS": true,
|
||||||
@@ -51,20 +77,21 @@ $ go run main.go
|
|||||||
|
|
||||||
* Supports will messages
|
* Supports will messages
|
||||||
|
|
||||||
* Queue subscribe
|
|
||||||
|
|
||||||
* Websocket Support
|
* Websocket Support
|
||||||
|
|
||||||
* TLS/SSL Support
|
* TLS/SSL Support
|
||||||
|
|
||||||
* Flexible ACL
|
* Flexible ACL
|
||||||
|
|
||||||
### QUEUE SUBSCRIBE
|
### Cluster
|
||||||
~~~
|
```bash
|
||||||
| Prefix | Examples |
|
1, start router for hmq (https://github.com/fhmq/router.git)
|
||||||
| ------------- |---------------------------------|
|
$ go get github.com/fhmq/router
|
||||||
| $queue/ | mosquitto_sub -t ‘$queue/topic’ |
|
$ cd $GOPATH/github.com/fhmq/router
|
||||||
~~~
|
$ go run main.go
|
||||||
|
2, config router in hmq.config ("router": "127.0.0.1:9888")
|
||||||
|
|
||||||
|
```
|
||||||
|
|
||||||
### ACL Configure
|
### ACL Configure
|
||||||
#### The ACL rules define:
|
#### The ACL rules define:
|
||||||
@@ -118,6 +145,14 @@ Client -> | Rule1 | --nomatch--> | Rule2 | --nomatch--> | Rule3 | -->
|
|||||||
allow | deny allow | deny allow | deny
|
allow | deny allow | deny allow | deny
|
||||||
~~~
|
~~~
|
||||||
|
|
||||||
|
### Online/Offline Notification
|
||||||
|
```bash
|
||||||
|
topic:
|
||||||
|
$SYS/broker/connection/clients/<clientID>
|
||||||
|
payload:
|
||||||
|
{"clientID":"client001","online":true/false,"timestamp":"2018-10-25T09:32:32Z"}
|
||||||
|
```
|
||||||
|
|
||||||
## Performance
|
## Performance
|
||||||
|
|
||||||
* High throughput
|
* High throughput
|
||||||
@@ -129,4 +164,9 @@ Client -> | Rule1 | --nomatch--> | Rule2 | --nomatch--> | Rule3 | -->
|
|||||||
|
|
||||||
## License
|
## License
|
||||||
|
|
||||||
* Apache License Version 2.0
|
* Apache License Version 2.0
|
||||||
|
|
||||||
|
|
||||||
|
## Reference
|
||||||
|
|
||||||
|
* Surgermq.(https://github.com/surgemq/surgemq)
|
||||||
@@ -1 +0,0 @@
|
|||||||
theme: jekyll-theme-slate
|
|
||||||
@@ -1,11 +1,12 @@
|
|||||||
|
/* Copyright (c) 2018, joy.zhou <chowyu08@gmail.com>
|
||||||
|
*/
|
||||||
package broker
|
package broker
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"hmq/lib/acl"
|
"github.com/fhmq/hmq/lib/acl"
|
||||||
"strings"
|
|
||||||
|
|
||||||
log "github.com/cihub/seelog"
|
|
||||||
"github.com/fsnotify/fsnotify"
|
"github.com/fsnotify/fsnotify"
|
||||||
|
"go.uber.org/zap"
|
||||||
|
"strings"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@@ -40,10 +41,10 @@ func (b *Broker) handleFsEvent(event fsnotify.Event) error {
|
|||||||
case b.config.AclConf:
|
case b.config.AclConf:
|
||||||
if event.Op&fsnotify.Write == fsnotify.Write ||
|
if event.Op&fsnotify.Write == fsnotify.Write ||
|
||||||
event.Op&fsnotify.Create == fsnotify.Create {
|
event.Op&fsnotify.Create == fsnotify.Create {
|
||||||
log.Info("text:handling acl config change event:", event)
|
log.Info("text:handling acl config change event:", zap.String("filename", event.Name))
|
||||||
aclconfig, err := acl.AclConfigLoad(event.Name)
|
aclconfig, err := acl.AclConfigLoad(event.Name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error("aclconfig change failed, load acl conf error: ", err)
|
log.Error("aclconfig change failed, load acl conf error: ", zap.Error(err))
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
b.AclConfig = aclconfig
|
b.AclConfig = aclconfig
|
||||||
@@ -56,14 +57,14 @@ func (b *Broker) StartAclWatcher() {
|
|||||||
go func() {
|
go func() {
|
||||||
wch, e := fsnotify.NewWatcher()
|
wch, e := fsnotify.NewWatcher()
|
||||||
if e != nil {
|
if e != nil {
|
||||||
log.Error("start monitor acl config file error,", e)
|
log.Error("start monitor acl config file error,", zap.Error(e))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
defer wch.Close()
|
defer wch.Close()
|
||||||
|
|
||||||
for _, i := range watchList {
|
for _, i := range watchList {
|
||||||
if err := wch.Add(i); err != nil {
|
if err := wch.Add(i); err != nil {
|
||||||
log.Error("start monitor acl config file error,", err)
|
log.Error("start monitor acl config file error,", zap.Error(err))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -73,7 +74,7 @@ func (b *Broker) StartAclWatcher() {
|
|||||||
case evt := <-wch.Events:
|
case evt := <-wch.Events:
|
||||||
b.handleFsEvent(evt)
|
b.handleFsEvent(evt)
|
||||||
case err := <-wch.Errors:
|
case err := <-wch.Errors:
|
||||||
log.Error("error:", err.Error())
|
log.Error("error:", zap.Error(err))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|||||||
416
broker/broker.go
416
broker/broker.go
@@ -1,8 +1,10 @@
|
|||||||
|
/* Copyright (c) 2018, joy.zhou <chowyu08@gmail.com>
|
||||||
|
*/
|
||||||
package broker
|
package broker
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"crypto/tls"
|
"crypto/tls"
|
||||||
"hmq/lib/acl"
|
"fmt"
|
||||||
"net"
|
"net"
|
||||||
"net/http"
|
"net/http"
|
||||||
"runtime/debug"
|
"runtime/debug"
|
||||||
@@ -11,39 +13,80 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/eclipse/paho.mqtt.golang/packets"
|
"github.com/eclipse/paho.mqtt.golang/packets"
|
||||||
|
"github.com/fhmq/hmq/lib/acl"
|
||||||
|
"github.com/fhmq/hmq/lib/sessions"
|
||||||
|
"github.com/fhmq/hmq/lib/topics"
|
||||||
|
"github.com/fhmq/hmq/pool"
|
||||||
"github.com/shirou/gopsutil/mem"
|
"github.com/shirou/gopsutil/mem"
|
||||||
|
"go.uber.org/zap"
|
||||||
"golang.org/x/net/websocket"
|
"golang.org/x/net/websocket"
|
||||||
|
|
||||||
log "github.com/cihub/seelog"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
MessagePoolNum = 1024
|
||||||
|
MessagePoolMessageNum = 1024
|
||||||
|
)
|
||||||
|
|
||||||
|
type Message struct {
|
||||||
|
client *client
|
||||||
|
packet packets.ControlPacket
|
||||||
|
}
|
||||||
|
|
||||||
type Broker struct {
|
type Broker struct {
|
||||||
id string
|
id string
|
||||||
cid uint64
|
cid uint64
|
||||||
config *Config
|
mu sync.Mutex
|
||||||
tlsConfig *tls.Config
|
config *Config
|
||||||
AclConfig *acl.ACLConfig
|
tlsConfig *tls.Config
|
||||||
clients sync.Map
|
AclConfig *acl.ACLConfig
|
||||||
routes sync.Map
|
wpool *pool.WorkerPool
|
||||||
remotes sync.Map
|
clients sync.Map
|
||||||
sl *Sublist
|
routes sync.Map
|
||||||
rl *RetainList
|
remotes sync.Map
|
||||||
queues map[string]int
|
nodes map[string]interface{}
|
||||||
|
clusterPool chan *Message
|
||||||
|
queues map[string]int
|
||||||
|
topicsMgr *topics.Manager
|
||||||
|
sessionMgr *sessions.Manager
|
||||||
|
// messagePool []chan *Message
|
||||||
|
}
|
||||||
|
|
||||||
|
func newMessagePool() []chan *Message {
|
||||||
|
pool := make([]chan *Message, 0)
|
||||||
|
for i := 0; i < MessagePoolNum; i++ {
|
||||||
|
ch := make(chan *Message, MessagePoolMessageNum)
|
||||||
|
pool = append(pool, ch)
|
||||||
|
}
|
||||||
|
return pool
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewBroker(config *Config) (*Broker, error) {
|
func NewBroker(config *Config) (*Broker, error) {
|
||||||
b := &Broker{
|
b := &Broker{
|
||||||
id: GenUniqueId(),
|
id: GenUniqueId(),
|
||||||
config: config,
|
config: config,
|
||||||
sl: NewSublist(),
|
wpool: pool.New(config.Worker),
|
||||||
rl: NewRetainList(),
|
nodes: make(map[string]interface{}),
|
||||||
queues: make(map[string]int),
|
queues: make(map[string]int),
|
||||||
|
clusterPool: make(chan *Message),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var err error
|
||||||
|
b.topicsMgr, err = topics.NewManager("mem")
|
||||||
|
if err != nil {
|
||||||
|
log.Error("new topic manager error", zap.Error(err))
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
b.sessionMgr, err = sessions.NewManager("mem")
|
||||||
|
if err != nil {
|
||||||
|
log.Error("new session manager error", zap.Error(err))
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
if b.config.TlsPort != "" {
|
if b.config.TlsPort != "" {
|
||||||
tlsconfig, err := NewTLSConfig(b.config.TlsInfo)
|
tlsconfig, err := NewTLSConfig(b.config.TlsInfo)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error("new tlsConfig error: ", err)
|
log.Error("new tlsConfig error", zap.Error(err))
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
b.tlsConfig = tlsconfig
|
b.tlsConfig = tlsconfig
|
||||||
@@ -51,7 +94,7 @@ func NewBroker(config *Config) (*Broker, error) {
|
|||||||
if b.config.Acl {
|
if b.config.Acl {
|
||||||
aclconfig, err := acl.AclConfigLoad(b.config.AclConf)
|
aclconfig, err := acl.AclConfigLoad(b.config.AclConf)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error("Load acl conf error: ", err)
|
log.Error("Load acl conf error", zap.Error(err))
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
b.AclConfig = aclconfig
|
b.AclConfig = aclconfig
|
||||||
@@ -60,40 +103,67 @@ func NewBroker(config *Config) (*Broker, error) {
|
|||||||
return b, nil
|
return b, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (b *Broker) SubmitWork(msg *Message) {
|
||||||
|
if b.wpool == nil {
|
||||||
|
b.wpool = pool.New(b.config.Worker)
|
||||||
|
}
|
||||||
|
|
||||||
|
if msg.client.typ == CLUSTER {
|
||||||
|
b.clusterPool <- msg
|
||||||
|
} else {
|
||||||
|
b.wpool.Submit(func() {
|
||||||
|
ProcessMessage(msg)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
func (b *Broker) Start() {
|
func (b *Broker) Start() {
|
||||||
if b == nil {
|
if b == nil {
|
||||||
log.Error("broker is null")
|
log.Error("broker is null")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
//listen clinet over tcp
|
||||||
if b.config.Port != "" {
|
if b.config.Port != "" {
|
||||||
go b.StartClientListening(false)
|
go b.StartClientListening(false)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
//listen for cluster
|
||||||
if b.config.Cluster.Port != "" {
|
if b.config.Cluster.Port != "" {
|
||||||
go b.StartClusterListening()
|
go b.StartClusterListening()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
//listen for websocket
|
||||||
if b.config.WsPort != "" {
|
if b.config.WsPort != "" {
|
||||||
go b.StartWebsocketListening()
|
go b.StartWebsocketListening()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
//listen client over tls
|
||||||
if b.config.TlsPort != "" {
|
if b.config.TlsPort != "" {
|
||||||
go b.StartClientListening(true)
|
go b.StartClientListening(true)
|
||||||
}
|
}
|
||||||
if len(b.config.Cluster.Routes) > 0 {
|
|
||||||
b.ConnectToRouters()
|
//connect on other node in cluster
|
||||||
|
if b.config.Router != "" {
|
||||||
|
go b.processClusterInfo()
|
||||||
|
b.ConnectToDiscovery()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
//system monitor
|
||||||
go StateMonitor()
|
go StateMonitor()
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func StateMonitor() {
|
func StateMonitor() {
|
||||||
v, _ := mem.VirtualMemory()
|
v, _ := mem.VirtualMemory()
|
||||||
timeSticker := time.NewTicker(time.Second * 5)
|
timeSticker := time.NewTicker(time.Second * 30)
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case <-timeSticker.C:
|
case <-timeSticker.C:
|
||||||
if v.UsedPercent > 0.8 {
|
if v.UsedPercent > 75 {
|
||||||
debug.FreeOSMemory()
|
debug.FreeOSMemory()
|
||||||
}
|
}
|
||||||
// fmt.Printf("Total: %v, Free:%v, UsedPercent:%f%%\n", v.Total, v.Free, v.UsedPercent)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -101,7 +171,7 @@ func StateMonitor() {
|
|||||||
func (b *Broker) StartWebsocketListening() {
|
func (b *Broker) StartWebsocketListening() {
|
||||||
path := b.config.WsPath
|
path := b.config.WsPath
|
||||||
hp := ":" + b.config.WsPort
|
hp := ":" + b.config.WsPort
|
||||||
log.Info("Start Webscoker Listening on ", hp, path)
|
log.Info("Start Websocket Listener on:", zap.String("hp", hp), zap.String("path", path))
|
||||||
http.Handle(path, websocket.Handler(b.wsHandler))
|
http.Handle(path, websocket.Handler(b.wsHandler))
|
||||||
var err error
|
var err error
|
||||||
if b.config.WsTLS {
|
if b.config.WsTLS {
|
||||||
@@ -110,7 +180,7 @@ func (b *Broker) StartWebsocketListening() {
|
|||||||
err = http.ListenAndServe(hp, nil)
|
err = http.ListenAndServe(hp, nil)
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error("ListenAndServe: " + err.Error())
|
log.Error("ListenAndServe:" + err.Error())
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -119,7 +189,7 @@ func (b *Broker) wsHandler(ws *websocket.Conn) {
|
|||||||
// io.Copy(ws, ws)
|
// io.Copy(ws, ws)
|
||||||
atomic.AddUint64(&b.cid, 1)
|
atomic.AddUint64(&b.cid, 1)
|
||||||
ws.PayloadType = websocket.BinaryFrame
|
ws.PayloadType = websocket.BinaryFrame
|
||||||
b.handleConnection(CLIENT, ws, b.cid)
|
b.handleConnection(CLIENT, ws)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *Broker) StartClientListening(Tls bool) {
|
func (b *Broker) StartClientListening(Tls bool) {
|
||||||
@@ -129,14 +199,14 @@ func (b *Broker) StartClientListening(Tls bool) {
|
|||||||
if Tls {
|
if Tls {
|
||||||
hp = b.config.TlsHost + ":" + b.config.TlsPort
|
hp = b.config.TlsHost + ":" + b.config.TlsPort
|
||||||
l, err = tls.Listen("tcp", hp, b.tlsConfig)
|
l, err = tls.Listen("tcp", hp, b.tlsConfig)
|
||||||
log.Info("Start TLS Listening client on ", hp)
|
log.Info("Start TLS Listening client on ", zap.String("hp", hp))
|
||||||
} else {
|
} else {
|
||||||
hp := b.config.Host + ":" + b.config.Port
|
hp := b.config.Host + ":" + b.config.Port
|
||||||
l, err = net.Listen("tcp", hp)
|
l, err = net.Listen("tcp", hp)
|
||||||
log.Info("Start Listening client on ", hp)
|
log.Info("Start Listening client on ", zap.String("hp", hp))
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error("Error listening on ", err)
|
log.Error("Error listening on ", zap.Error(err))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
tmpDelay := 10 * ACCEPT_MIN_SLEEP
|
tmpDelay := 10 * ACCEPT_MIN_SLEEP
|
||||||
@@ -145,20 +215,20 @@ func (b *Broker) StartClientListening(Tls bool) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
if ne, ok := err.(net.Error); ok && ne.Temporary() {
|
if ne, ok := err.(net.Error); ok && ne.Temporary() {
|
||||||
log.Error("Temporary Client Accept Error(%v), sleeping %dms",
|
log.Error("Temporary Client Accept Error(%v), sleeping %dms",
|
||||||
ne, tmpDelay/time.Millisecond)
|
zap.Error(ne), zap.Duration("sleeping", tmpDelay/time.Millisecond))
|
||||||
time.Sleep(tmpDelay)
|
time.Sleep(tmpDelay)
|
||||||
tmpDelay *= 2
|
tmpDelay *= 2
|
||||||
if tmpDelay > ACCEPT_MAX_SLEEP {
|
if tmpDelay > ACCEPT_MAX_SLEEP {
|
||||||
tmpDelay = ACCEPT_MAX_SLEEP
|
tmpDelay = ACCEPT_MAX_SLEEP
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
log.Error("Accept error: %v", err)
|
log.Error("Accept error: %v", zap.Error(err))
|
||||||
}
|
}
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
tmpDelay = ACCEPT_MIN_SLEEP
|
tmpDelay = ACCEPT_MIN_SLEEP
|
||||||
atomic.AddUint64(&b.cid, 1)
|
atomic.AddUint64(&b.cid, 1)
|
||||||
go b.handleConnection(CLIENT, conn, b.cid)
|
go b.handleConnection(CLIENT, conn)
|
||||||
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -171,7 +241,7 @@ func (b *Broker) Handshake(conn net.Conn) bool {
|
|||||||
|
|
||||||
// Force handshake
|
// Force handshake
|
||||||
if err := nc.Handshake(); err != nil {
|
if err := nc.Handshake(); err != nil {
|
||||||
log.Error("TLS handshake error, ", err)
|
log.Error("TLS handshake error, ", zap.Error(err))
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
nc.SetReadDeadline(time.Time{})
|
nc.SetReadDeadline(time.Time{})
|
||||||
@@ -194,49 +264,42 @@ func TlsTimeout(conn *tls.Conn) {
|
|||||||
|
|
||||||
func (b *Broker) StartClusterListening() {
|
func (b *Broker) StartClusterListening() {
|
||||||
var hp string = b.config.Cluster.Host + ":" + b.config.Cluster.Port
|
var hp string = b.config.Cluster.Host + ":" + b.config.Cluster.Port
|
||||||
log.Info("Start Listening cluster on ", hp)
|
log.Info("Start Listening cluster on ", zap.String("hp", hp))
|
||||||
|
|
||||||
l, e := net.Listen("tcp", hp)
|
l, e := net.Listen("tcp", hp)
|
||||||
if e != nil {
|
if e != nil {
|
||||||
log.Error("Error listening on ", e)
|
log.Error("Error listening on ", zap.Error(e))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
var idx uint64 = 0
|
|
||||||
tmpDelay := 10 * ACCEPT_MIN_SLEEP
|
tmpDelay := 10 * ACCEPT_MIN_SLEEP
|
||||||
for {
|
for {
|
||||||
conn, err := l.Accept()
|
conn, err := l.Accept()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if ne, ok := err.(net.Error); ok && ne.Temporary() {
|
if ne, ok := err.(net.Error); ok && ne.Temporary() {
|
||||||
log.Error("Temporary Client Accept Error(%v), sleeping %dms",
|
log.Error("Temporary Client Accept Error(%v), sleeping %dms",
|
||||||
ne, tmpDelay/time.Millisecond)
|
zap.Error(ne), zap.Duration("sleeping", tmpDelay/time.Millisecond))
|
||||||
time.Sleep(tmpDelay)
|
time.Sleep(tmpDelay)
|
||||||
tmpDelay *= 2
|
tmpDelay *= 2
|
||||||
if tmpDelay > ACCEPT_MAX_SLEEP {
|
if tmpDelay > ACCEPT_MAX_SLEEP {
|
||||||
tmpDelay = ACCEPT_MAX_SLEEP
|
tmpDelay = ACCEPT_MAX_SLEEP
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
log.Error("Accept error: %v", err)
|
log.Error("Accept error: %v", zap.Error(err))
|
||||||
}
|
}
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
tmpDelay = ACCEPT_MIN_SLEEP
|
tmpDelay = ACCEPT_MIN_SLEEP
|
||||||
|
|
||||||
go b.handleConnection(ROUTER, conn, idx)
|
go b.handleConnection(ROUTER, conn)
|
||||||
if idx == 1 {
|
|
||||||
idx = 0
|
|
||||||
} else {
|
|
||||||
idx = idx + 1
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *Broker) handleConnection(typ int, conn net.Conn, idx uint64) {
|
func (b *Broker) handleConnection(typ int, conn net.Conn) {
|
||||||
//process connect packet
|
//process connect packet
|
||||||
packet, err := packets.ReadPacket(conn)
|
packet, err := packets.ReadPacket(conn)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error("read connect packet error: ", err)
|
log.Error("read connect packet error: ", zap.Error(err))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if packet == nil {
|
if packet == nil {
|
||||||
@@ -253,7 +316,7 @@ func (b *Broker) handleConnection(typ int, conn net.Conn, idx uint64) {
|
|||||||
connack.SessionPresent = msg.CleanSession
|
connack.SessionPresent = msg.CleanSession
|
||||||
err = connack.Write(conn)
|
err = connack.Write(conn)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error("send connack error, ", err)
|
log.Error("send connack error, ", zap.Error(err), zap.String("clientID", msg.ClientIdentifier))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -281,75 +344,196 @@ func (b *Broker) handleConnection(typ int, conn net.Conn, idx uint64) {
|
|||||||
conn: conn,
|
conn: conn,
|
||||||
info: info,
|
info: info,
|
||||||
}
|
}
|
||||||
|
|
||||||
c.init()
|
c.init()
|
||||||
|
|
||||||
|
err = b.getSession(c, msg, connack)
|
||||||
|
if err != nil {
|
||||||
|
log.Error("get session error: ", zap.String("clientID", c.info.clientID))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
cid := c.info.clientID
|
cid := c.info.clientID
|
||||||
|
|
||||||
var msgPool *MessagePool
|
|
||||||
var exist bool
|
var exist bool
|
||||||
var old interface{}
|
var old interface{}
|
||||||
|
|
||||||
switch typ {
|
switch typ {
|
||||||
case CLIENT:
|
case CLIENT:
|
||||||
msgPool = MSGPool[idx%MessagePoolNum].GetPool()
|
|
||||||
old, exist = b.clients.Load(cid)
|
old, exist = b.clients.Load(cid)
|
||||||
|
if exist {
|
||||||
|
log.Warn("client exist, close old...", zap.String("clientID", c.info.clientID))
|
||||||
|
ol, ok := old.(*client)
|
||||||
|
if ok {
|
||||||
|
ol.Close()
|
||||||
|
}
|
||||||
|
}
|
||||||
b.clients.Store(cid, c)
|
b.clients.Store(cid, c)
|
||||||
|
|
||||||
|
b.OnlineOfflineNotification(cid, true)
|
||||||
case ROUTER:
|
case ROUTER:
|
||||||
msgPool = MSGPool[(MessagePoolNum + idx)].GetPool()
|
|
||||||
old, exist = b.routes.Load(cid)
|
old, exist = b.routes.Load(cid)
|
||||||
|
if exist {
|
||||||
|
log.Warn("router exist, close old...")
|
||||||
|
ol, ok := old.(*client)
|
||||||
|
if ok {
|
||||||
|
ol.Close()
|
||||||
|
}
|
||||||
|
}
|
||||||
b.routes.Store(cid, c)
|
b.routes.Store(cid, c)
|
||||||
}
|
}
|
||||||
|
|
||||||
if exist {
|
// mpool := b.messagePool[fnv1a.HashString64(cid)%MessagePoolNum]
|
||||||
log.Warn("client or routers exist, close old...")
|
|
||||||
ol, ok := old.(*client)
|
c.readLoop()
|
||||||
if ok {
|
|
||||||
ol.Close()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
c.readLoop(msgPool)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *Broker) ConnectToRouters() {
|
func (b *Broker) ConnectToDiscovery() {
|
||||||
for i := 0; i < len(b.config.Cluster.Routes); i++ {
|
|
||||||
url := b.config.Cluster.Routes[i]
|
|
||||||
go b.connectRouter(url, "")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *Broker) connectRouter(url, remoteID string) {
|
|
||||||
var conn net.Conn
|
var conn net.Conn
|
||||||
var err error
|
var err error
|
||||||
|
var tempDelay time.Duration = 0
|
||||||
for {
|
for {
|
||||||
conn, err = net.Dial("tcp", url)
|
conn, err = net.Dial("tcp", b.config.Router)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error("Error trying to connect to route: ", err)
|
log.Error("Error trying to connect to route: ", zap.Error(err))
|
||||||
select {
|
log.Debug("Connect to route timeout ,retry...")
|
||||||
case <-time.After(DEFAULT_ROUTE_CONNECT):
|
|
||||||
log.Debug("Connect to route timeout ,retry...")
|
if 0 == tempDelay {
|
||||||
continue
|
tempDelay = 1 * time.Second
|
||||||
|
} else {
|
||||||
|
tempDelay *= 2
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if max := 20 * time.Second; tempDelay > max {
|
||||||
|
tempDelay = max
|
||||||
|
}
|
||||||
|
time.Sleep(tempDelay)
|
||||||
|
continue
|
||||||
}
|
}
|
||||||
|
break
|
||||||
}
|
}
|
||||||
route := &route{
|
log.Debug("connect to router success :", zap.String("Router", b.config.Router))
|
||||||
remoteID: remoteID,
|
|
||||||
remoteUrl: url,
|
cid := b.id
|
||||||
|
info := info{
|
||||||
|
clientID: cid,
|
||||||
|
keepalive: 60,
|
||||||
|
}
|
||||||
|
|
||||||
|
c := &client{
|
||||||
|
typ: CLUSTER,
|
||||||
|
broker: b,
|
||||||
|
conn: conn,
|
||||||
|
info: info,
|
||||||
|
}
|
||||||
|
|
||||||
|
c.init()
|
||||||
|
|
||||||
|
c.SendConnect()
|
||||||
|
c.SendInfo()
|
||||||
|
|
||||||
|
go c.readLoop()
|
||||||
|
go c.StartPing()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *Broker) processClusterInfo() {
|
||||||
|
for {
|
||||||
|
msg, ok := <-b.clusterPool
|
||||||
|
if !ok {
|
||||||
|
log.Error("read message from cluster channel error")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
ProcessMessage(msg)
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *Broker) connectRouter(id, addr string) {
|
||||||
|
var conn net.Conn
|
||||||
|
var err error
|
||||||
|
var timeDelay time.Duration = 0
|
||||||
|
retryTimes := 0
|
||||||
|
max := 32 * time.Second
|
||||||
|
for {
|
||||||
|
|
||||||
|
if !b.checkNodeExist(id, addr) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
conn, err = net.Dial("tcp", addr)
|
||||||
|
if err != nil {
|
||||||
|
log.Error("Error trying to connect to route: ", zap.Error(err))
|
||||||
|
|
||||||
|
if retryTimes > 50 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Debug("Connect to route timeout ,retry...")
|
||||||
|
|
||||||
|
if 0 == timeDelay {
|
||||||
|
timeDelay = 1 * time.Second
|
||||||
|
} else {
|
||||||
|
timeDelay *= 2
|
||||||
|
}
|
||||||
|
|
||||||
|
if timeDelay > max {
|
||||||
|
timeDelay = max
|
||||||
|
}
|
||||||
|
time.Sleep(timeDelay)
|
||||||
|
retryTimes++
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
break
|
||||||
|
}
|
||||||
|
route := route{
|
||||||
|
remoteID: id,
|
||||||
|
remoteUrl: addr,
|
||||||
}
|
}
|
||||||
cid := GenUniqueId()
|
cid := GenUniqueId()
|
||||||
|
|
||||||
info := info{
|
info := info{
|
||||||
clientID: cid,
|
clientID: cid,
|
||||||
|
keepalive: 60,
|
||||||
}
|
}
|
||||||
|
|
||||||
c := &client{
|
c := &client{
|
||||||
typ: REMOTE,
|
broker: b,
|
||||||
conn: conn,
|
typ: REMOTE,
|
||||||
route: route,
|
conn: conn,
|
||||||
info: info,
|
route: route,
|
||||||
|
info: info,
|
||||||
}
|
}
|
||||||
c.init()
|
c.init()
|
||||||
b.remotes.Store(cid, c)
|
b.remotes.Store(cid, c)
|
||||||
|
|
||||||
c.SendConnect()
|
c.SendConnect()
|
||||||
c.SendInfo()
|
|
||||||
c.StartPing()
|
// mpool := b.messagePool[fnv1a.HashString64(cid)%MessagePoolNum]
|
||||||
|
go c.readLoop()
|
||||||
|
go c.StartPing()
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *Broker) checkNodeExist(id, url string) bool {
|
||||||
|
if id == b.id {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
for k, v := range b.nodes {
|
||||||
|
if k == id {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
//skip
|
||||||
|
l, ok := v.(string)
|
||||||
|
if ok {
|
||||||
|
if url == l {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *Broker) CheckRemoteExist(remoteID, url string) bool {
|
func (b *Broker) CheckRemoteExist(remoteID, url string) bool {
|
||||||
@@ -358,9 +542,7 @@ func (b *Broker) CheckRemoteExist(remoteID, url string) bool {
|
|||||||
v, ok := value.(*client)
|
v, ok := value.(*client)
|
||||||
if ok {
|
if ok {
|
||||||
if v.route.remoteUrl == url {
|
if v.route.remoteUrl == url {
|
||||||
// if v.route.remoteID == "" || v.route.remoteID != remoteID {
|
|
||||||
v.route.remoteID = remoteID
|
v.route.remoteID = remoteID
|
||||||
// }
|
|
||||||
exist = true
|
exist = true
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
@@ -375,22 +557,24 @@ func (b *Broker) SendLocalSubsToRouter(c *client) {
|
|||||||
b.clients.Range(func(key, value interface{}) bool {
|
b.clients.Range(func(key, value interface{}) bool {
|
||||||
client, ok := value.(*client)
|
client, ok := value.(*client)
|
||||||
if ok {
|
if ok {
|
||||||
subs := client.subs
|
subs := client.subMap
|
||||||
for _, sub := range subs {
|
for _, sub := range subs {
|
||||||
subInfo.Topics = append(subInfo.Topics, string(sub.topic))
|
subInfo.Topics = append(subInfo.Topics, sub.topic)
|
||||||
subInfo.Qoss = append(subInfo.Qoss, sub.qos)
|
subInfo.Qoss = append(subInfo.Qoss, sub.qos)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return true
|
return true
|
||||||
})
|
})
|
||||||
err := c.WriterPacket(subInfo)
|
if len(subInfo.Topics) > 0 {
|
||||||
if err != nil {
|
err := c.WriterPacket(subInfo)
|
||||||
log.Error("Send localsubs To Router error :", err)
|
if err != nil {
|
||||||
|
log.Error("Send localsubs To Router error :", zap.Error(err))
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *Broker) BroadcastInfoMessage(remoteID string, msg *packets.PublishPacket) {
|
func (b *Broker) BroadcastInfoMessage(remoteID string, msg *packets.PublishPacket) {
|
||||||
b.remotes.Range(func(key, value interface{}) bool {
|
b.routes.Range(func(key, value interface{}) bool {
|
||||||
r, ok := value.(*client)
|
r, ok := value.(*client)
|
||||||
if ok {
|
if ok {
|
||||||
if r.route.remoteID == remoteID {
|
if r.route.remoteID == remoteID {
|
||||||
@@ -405,7 +589,8 @@ func (b *Broker) BroadcastInfoMessage(remoteID string, msg *packets.PublishPacke
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (b *Broker) BroadcastSubOrUnsubMessage(packet packets.ControlPacket) {
|
func (b *Broker) BroadcastSubOrUnsubMessage(packet packets.ControlPacket) {
|
||||||
b.remotes.Range(func(key, value interface{}) bool {
|
|
||||||
|
b.routes.Range(func(key, value interface{}) bool {
|
||||||
r, ok := value.(*client)
|
r, ok := value.(*client)
|
||||||
if ok {
|
if ok {
|
||||||
r.WriterPacket(packet)
|
r.WriterPacket(packet)
|
||||||
@@ -430,18 +615,20 @@ func (b *Broker) removeClient(c *client) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (b *Broker) PublishMessage(packet *packets.PublishPacket) {
|
func (b *Broker) PublishMessage(packet *packets.PublishPacket) {
|
||||||
topic := packet.TopicName
|
var subs []interface{}
|
||||||
r := b.sl.Match(topic)
|
var qoss []byte
|
||||||
// log.Info("psubs num: ", len(r.psubs))
|
err := b.topicsMgr.Subscribers([]byte(packet.TopicName), packet.Qos, &subs, &qoss)
|
||||||
if len(r.psubs) == 0 {
|
if err != nil {
|
||||||
|
log.Error("search sub client error, ", zap.Error(err))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, sub := range r.psubs {
|
for _, sub := range subs {
|
||||||
if sub != nil {
|
s, ok := sub.(*subscription)
|
||||||
err := sub.client.WriterPacket(packet)
|
if ok {
|
||||||
|
err := s.client.WriterPacket(packet)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error("process message for psub error, ", err)
|
log.Error("write message error, ", zap.Error(err))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -449,14 +636,21 @@ func (b *Broker) PublishMessage(packet *packets.PublishPacket) {
|
|||||||
|
|
||||||
func (b *Broker) BroadcastUnSubscribe(subs map[string]*subscription) {
|
func (b *Broker) BroadcastUnSubscribe(subs map[string]*subscription) {
|
||||||
|
|
||||||
ubsub := packets.NewControlPacket(packets.Unsubscribe).(*packets.UnsubscribePacket)
|
unsub := packets.NewControlPacket(packets.Unsubscribe).(*packets.UnsubscribePacket)
|
||||||
for topic, _ := range subs {
|
for topic, _ := range subs {
|
||||||
// topic := sub.topic
|
unsub.Topics = append(unsub.Topics, topic)
|
||||||
// if sub.queue {
|
|
||||||
// topic = "$queue/" + sub.topic
|
|
||||||
// }
|
|
||||||
ubsub.Topics = append(ubsub.Topics, topic)
|
|
||||||
}
|
}
|
||||||
b.BroadcastSubOrUnsubMessage(ubsub)
|
|
||||||
|
|
||||||
|
if len(unsub.Topics) > 0 {
|
||||||
|
b.BroadcastSubOrUnsubMessage(unsub)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *Broker) OnlineOfflineNotification(clientID string, online bool) {
|
||||||
|
packet := packets.NewControlPacket(packets.Publish).(*packets.PublishPacket)
|
||||||
|
packet.TopicName = "$SYS/broker/connection/clients/" + clientID
|
||||||
|
packet.Qos = 0
|
||||||
|
packet.Payload = []byte(fmt.Sprintf(`{"clientID":"%s","online":%v,"timestamp":"%s"}`, clientID, online, time.Now().UTC().Format(time.RFC3339)))
|
||||||
|
|
||||||
|
b.PublishMessage(packet)
|
||||||
}
|
}
|
||||||
|
|||||||
382
broker/client.go
382
broker/client.go
@@ -1,25 +1,32 @@
|
|||||||
|
/* Copyright (c) 2018, joy.zhou <chowyu08@gmail.com>
|
||||||
|
*/
|
||||||
package broker
|
package broker
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
"net"
|
"net"
|
||||||
|
"reflect"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/eclipse/paho.mqtt.golang/packets"
|
"github.com/eclipse/paho.mqtt.golang/packets"
|
||||||
|
"github.com/fhmq/hmq/lib/sessions"
|
||||||
log "github.com/cihub/seelog"
|
"github.com/fhmq/hmq/lib/topics"
|
||||||
|
"go.uber.org/zap"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
// special pub topic for cluster info BrokerInfoTopic
|
// special pub topic for cluster info BrokerInfoTopic
|
||||||
BrokerInfoTopic = "broker001info/brokerinfo"
|
BrokerInfoTopic = "broker000100101info"
|
||||||
// CLIENT is an end user.
|
// CLIENT is an end user.
|
||||||
CLIENT = 0
|
CLIENT = 0
|
||||||
// ROUTER is another router in the cluster.
|
// ROUTER is another router in the cluster.
|
||||||
ROUTER = 1
|
ROUTER = 1
|
||||||
//REMOTE is the router connect to other cluster
|
//REMOTE is the router connect to other cluster
|
||||||
REMOTE = 2
|
REMOTE = 2
|
||||||
|
CLUSTER = 3
|
||||||
)
|
)
|
||||||
const (
|
const (
|
||||||
Connected = 1
|
Connected = 1
|
||||||
@@ -27,16 +34,21 @@ const (
|
|||||||
)
|
)
|
||||||
|
|
||||||
type client struct {
|
type client struct {
|
||||||
typ int
|
typ int
|
||||||
mu sync.Mutex
|
mu sync.Mutex
|
||||||
broker *Broker
|
broker *Broker
|
||||||
conn net.Conn
|
conn net.Conn
|
||||||
info info
|
info info
|
||||||
route *route
|
route route
|
||||||
status int
|
status int
|
||||||
smu sync.RWMutex
|
ctx context.Context
|
||||||
subs map[string]*subscription
|
cancelFunc context.CancelFunc
|
||||||
rsubs map[string]*subInfo
|
session *sessions.Session
|
||||||
|
subMap map[string]*subscription
|
||||||
|
topicsMgr *topics.Manager
|
||||||
|
subs []interface{}
|
||||||
|
qoss []byte
|
||||||
|
rmsgs []*packets.PublishPacket
|
||||||
}
|
}
|
||||||
|
|
||||||
type subInfo struct {
|
type subInfo struct {
|
||||||
@@ -71,51 +83,51 @@ var (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func (c *client) init() {
|
func (c *client) init() {
|
||||||
c.smu.Lock()
|
|
||||||
defer c.smu.Unlock()
|
|
||||||
c.status = Connected
|
c.status = Connected
|
||||||
typ := c.typ
|
|
||||||
if typ == ROUTER {
|
|
||||||
c.rsubs = make(map[string]*subInfo)
|
|
||||||
} else if typ == CLIENT {
|
|
||||||
c.subs = make(map[string]*subscription, 10)
|
|
||||||
}
|
|
||||||
c.info.localIP = strings.Split(c.conn.LocalAddr().String(), ":")[0]
|
c.info.localIP = strings.Split(c.conn.LocalAddr().String(), ":")[0]
|
||||||
c.info.remoteIP = strings.Split(c.conn.RemoteAddr().String(), ":")[0]
|
c.info.remoteIP = strings.Split(c.conn.RemoteAddr().String(), ":")[0]
|
||||||
|
c.ctx, c.cancelFunc = context.WithCancel(context.Background())
|
||||||
|
c.subMap = make(map[string]*subscription)
|
||||||
|
c.topicsMgr = c.broker.topicsMgr
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *client) readLoop(msgPool *MessagePool) {
|
func (c *client) readLoop() {
|
||||||
nc := c.conn
|
nc := c.conn
|
||||||
if nc == nil || msgPool == nil {
|
b := c.broker
|
||||||
|
if nc == nil || b == nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
lastIn := uint16(time.Now().Unix())
|
keepAlive := time.Second * time.Duration(c.info.keepalive)
|
||||||
var nowTime uint16
|
timeOut := keepAlive + (keepAlive / 2)
|
||||||
|
|
||||||
for {
|
for {
|
||||||
nowTime = uint16(time.Now().Unix())
|
select {
|
||||||
if 0 != c.info.keepalive && nowTime-lastIn > c.info.keepalive*3/2 {
|
case <-c.ctx.Done():
|
||||||
log.Errorf("Client %s has exceeded timeout, disconnecting.\n", c.info.clientID)
|
|
||||||
msg := &Message{client: c, packet: DisconnectdPacket}
|
|
||||||
msgPool.queue <- msg
|
|
||||||
return
|
return
|
||||||
|
default:
|
||||||
|
//add read timeout
|
||||||
|
if err := nc.SetReadDeadline(time.Now().Add(timeOut)); err != nil {
|
||||||
|
log.Error("set read timeout error: ", zap.Error(err), zap.String("ClientID", c.info.clientID))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
packet, err := packets.ReadPacket(nc)
|
||||||
|
if err != nil {
|
||||||
|
log.Error("read packet error: ", zap.Error(err), zap.String("ClientID", c.info.clientID))
|
||||||
|
msg := &Message{client: c, packet: DisconnectdPacket}
|
||||||
|
b.SubmitWork(msg)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
msg := &Message{
|
||||||
|
client: c,
|
||||||
|
packet: packet,
|
||||||
|
}
|
||||||
|
b.SubmitWork(msg)
|
||||||
}
|
}
|
||||||
packet, err := packets.ReadPacket(nc)
|
|
||||||
if err != nil {
|
|
||||||
log.Error("read packet error: ", err)
|
|
||||||
msg := &Message{client: c, packet: DisconnectdPacket}
|
|
||||||
msgPool.queue <- msg
|
|
||||||
return
|
|
||||||
}
|
|
||||||
// log.Info("recv buf: ", packet)
|
|
||||||
lastIn = uint16(time.Now().Unix())
|
|
||||||
msg := &Message{
|
|
||||||
client: c,
|
|
||||||
packet: packet,
|
|
||||||
}
|
|
||||||
msgPool.queue <- msg
|
|
||||||
}
|
}
|
||||||
msgPool.Reduce()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func ProcessMessage(msg *Message) {
|
func ProcessMessage(msg *Message) {
|
||||||
@@ -124,45 +136,32 @@ func ProcessMessage(msg *Message) {
|
|||||||
if ca == nil {
|
if ca == nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
log.Debug("Recv message:", zap.String("message type", reflect.TypeOf(msg.packet).String()[9:]), zap.String("ClientID", c.info.clientID))
|
||||||
switch ca.(type) {
|
switch ca.(type) {
|
||||||
case *packets.ConnackPacket:
|
case *packets.ConnackPacket:
|
||||||
// log.Info("Recv conack message..........")
|
|
||||||
case *packets.ConnectPacket:
|
case *packets.ConnectPacket:
|
||||||
// log.Info("Recv connect message..........")
|
|
||||||
case *packets.PublishPacket:
|
case *packets.PublishPacket:
|
||||||
// log.Info("Recv publish message..........")
|
|
||||||
packet := ca.(*packets.PublishPacket)
|
packet := ca.(*packets.PublishPacket)
|
||||||
c.ProcessPublish(packet)
|
c.ProcessPublish(packet)
|
||||||
case *packets.PubackPacket:
|
case *packets.PubackPacket:
|
||||||
//log.Info("Recv publish ack message..........")
|
|
||||||
case *packets.PubrecPacket:
|
case *packets.PubrecPacket:
|
||||||
//log.Info("Recv publish rec message..........")
|
|
||||||
case *packets.PubrelPacket:
|
case *packets.PubrelPacket:
|
||||||
//log.Info("Recv publish rel message..........")
|
|
||||||
case *packets.PubcompPacket:
|
case *packets.PubcompPacket:
|
||||||
//log.Info("Recv publish ack message..........")
|
|
||||||
case *packets.SubscribePacket:
|
case *packets.SubscribePacket:
|
||||||
// log.Info("Recv subscribe message.....")
|
|
||||||
packet := ca.(*packets.SubscribePacket)
|
packet := ca.(*packets.SubscribePacket)
|
||||||
c.ProcessSubscribe(packet)
|
c.ProcessSubscribe(packet)
|
||||||
case *packets.SubackPacket:
|
case *packets.SubackPacket:
|
||||||
// log.Info("Recv suback message.....")
|
|
||||||
case *packets.UnsubscribePacket:
|
case *packets.UnsubscribePacket:
|
||||||
// log.Info("Recv unsubscribe message.....")
|
|
||||||
packet := ca.(*packets.UnsubscribePacket)
|
packet := ca.(*packets.UnsubscribePacket)
|
||||||
c.ProcessUnSubscribe(packet)
|
c.ProcessUnSubscribe(packet)
|
||||||
case *packets.UnsubackPacket:
|
case *packets.UnsubackPacket:
|
||||||
//log.Info("Recv unsuback message.....")
|
|
||||||
case *packets.PingreqPacket:
|
case *packets.PingreqPacket:
|
||||||
// log.Info("Recv PINGREQ message..........")
|
|
||||||
c.ProcessPing()
|
c.ProcessPing()
|
||||||
case *packets.PingrespPacket:
|
case *packets.PingrespPacket:
|
||||||
//log.Info("Recv PINGRESP message..........")
|
|
||||||
case *packets.DisconnectPacket:
|
case *packets.DisconnectPacket:
|
||||||
// log.Info("Recv DISCONNECT message.......")
|
|
||||||
c.Close()
|
c.Close()
|
||||||
default:
|
default:
|
||||||
log.Info("Recv Unknow message.......")
|
log.Info("Recv Unknow message.......", zap.String("ClientID", c.info.clientID))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -172,8 +171,13 @@ func (c *client) ProcessPublish(packet *packets.PublishPacket) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
topic := packet.TopicName
|
topic := packet.TopicName
|
||||||
|
if topic == BrokerInfoTopic && c.typ == CLUSTER {
|
||||||
|
c.ProcessInfo(packet)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
if !c.CheckTopicAuth(PUB, topic) {
|
if !c.CheckTopicAuth(PUB, topic) {
|
||||||
log.Error("Pub Topics Auth failed, ", topic)
|
log.Error("Pub Topics Auth failed, ", zap.String("topic", topic), zap.String("ClientID", c.info.clientID))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -184,24 +188,16 @@ func (c *client) ProcessPublish(packet *packets.PublishPacket) {
|
|||||||
puback := packets.NewControlPacket(packets.Puback).(*packets.PubackPacket)
|
puback := packets.NewControlPacket(packets.Puback).(*packets.PubackPacket)
|
||||||
puback.MessageID = packet.MessageID
|
puback.MessageID = packet.MessageID
|
||||||
if err := c.WriterPacket(puback); err != nil {
|
if err := c.WriterPacket(puback); err != nil {
|
||||||
log.Error("send puback error, ", err)
|
log.Error("send puback error, ", zap.Error(err), zap.String("ClientID", c.info.clientID))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
c.ProcessPublishMessage(packet)
|
c.ProcessPublishMessage(packet)
|
||||||
case QosExactlyOnce:
|
case QosExactlyOnce:
|
||||||
return
|
return
|
||||||
default:
|
default:
|
||||||
log.Error("publish with unknown qos")
|
log.Error("publish with unknown qos", zap.String("ClientID", c.info.clientID))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if packet.Retain {
|
|
||||||
if b := c.broker; b != nil {
|
|
||||||
err := b.rl.Insert(topic, packet)
|
|
||||||
if err != nil {
|
|
||||||
log.Error("Insert Retain Message error: ", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -215,81 +211,40 @@ func (c *client) ProcessPublishMessage(packet *packets.PublishPacket) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
typ := c.typ
|
typ := c.typ
|
||||||
topic := packet.TopicName
|
|
||||||
|
|
||||||
r := b.sl.Match(topic)
|
if packet.Retain {
|
||||||
// log.Info("psubs num: ", len(r.psubs))
|
if err := c.topicsMgr.Retain(packet); err != nil {
|
||||||
if len(r.qsubs) == 0 && len(r.psubs) == 0 {
|
log.Error("Error retaining message: ", zap.Error(err), zap.String("ClientID", c.info.clientID))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
err := c.topicsMgr.Subscribers([]byte(packet.TopicName), packet.Qos, &c.subs, &c.qoss)
|
||||||
|
if err != nil {
|
||||||
|
log.Error("Error retrieving subscribers list: ", zap.String("ClientID", c.info.clientID))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, sub := range r.psubs {
|
// log.Info("psubs num: ", len(r.psubs))
|
||||||
if sub.client.typ == ROUTER {
|
if len(c.subs) == 0 {
|
||||||
if typ == ROUTER {
|
return
|
||||||
continue
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if sub != nil {
|
|
||||||
err := sub.client.WriterPacket(packet)
|
|
||||||
if err != nil {
|
|
||||||
log.Error("process message for psub error, ", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pre := -1
|
for _, sub := range c.subs {
|
||||||
now := -1
|
s, ok := sub.(*subscription)
|
||||||
t := "$queue/" + topic
|
if ok {
|
||||||
cnt, exist := b.queues[t]
|
if s.client.typ == ROUTER {
|
||||||
if exist {
|
if typ != CLIENT {
|
||||||
// log.Info("queue index : ", cnt)
|
|
||||||
for _, sub := range r.qsubs {
|
|
||||||
if sub.client.typ == ROUTER {
|
|
||||||
if c.typ == ROUTER {
|
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if c.typ == CLIENT {
|
err := s.client.WriterPacket(packet)
|
||||||
now = now + 1
|
if err != nil {
|
||||||
} else {
|
log.Error("process message for psub error, ", zap.Error(err), zap.String("ClientID", c.info.clientID))
|
||||||
now = now + sub.client.rsubs[t].num
|
|
||||||
}
|
}
|
||||||
if cnt > pre && cnt <= now {
|
|
||||||
if sub != nil {
|
|
||||||
err := sub.client.WriterPacket(packet)
|
|
||||||
if err != nil {
|
|
||||||
log.Error("send publish error, ", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
break
|
|
||||||
}
|
|
||||||
pre = now
|
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
length := getQueueSubscribeNum(r.qsubs)
|
|
||||||
if length > 0 {
|
|
||||||
b.queues[t] = (b.queues[t] + 1) % length
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func getQueueSubscribeNum(qsubs []*subscription) int {
|
|
||||||
topic := "$queue/"
|
|
||||||
if len(qsubs) < 1 {
|
|
||||||
return 0
|
|
||||||
} else {
|
|
||||||
topic = topic + qsubs[0].topic
|
|
||||||
}
|
|
||||||
num := 0
|
|
||||||
for _, sub := range qsubs {
|
|
||||||
if sub.client.typ == CLIENT {
|
|
||||||
num = num + 1
|
|
||||||
} else {
|
|
||||||
num = num + sub.client.rsubs[topic].num
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return num
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *client) ProcessSubscribe(packet *packets.SubscribePacket) {
|
func (c *client) ProcessSubscribe(packet *packets.SubscribePacket) {
|
||||||
@@ -312,64 +267,34 @@ func (c *client) ProcessSubscribe(packet *packets.SubscribePacket) {
|
|||||||
t := topic
|
t := topic
|
||||||
//check topic auth for client
|
//check topic auth for client
|
||||||
if !c.CheckTopicAuth(SUB, topic) {
|
if !c.CheckTopicAuth(SUB, topic) {
|
||||||
log.Error("Sub topic Auth failed: ", topic)
|
log.Error("Sub topic Auth failed: ", zap.String("topic", topic), zap.String("ClientID", c.info.clientID))
|
||||||
retcodes = append(retcodes, QosFailure)
|
retcodes = append(retcodes, QosFailure)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
queue := strings.HasPrefix(topic, "$queue/")
|
|
||||||
if queue {
|
|
||||||
if len(t) > 7 {
|
|
||||||
t = t[7:]
|
|
||||||
if _, exists := b.queues[topic]; !exists {
|
|
||||||
b.queues[topic] = 0
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
retcodes = append(retcodes, QosFailure)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
}
|
|
||||||
sub := &subscription{
|
sub := &subscription{
|
||||||
topic: t,
|
topic: t,
|
||||||
qos: qoss[i],
|
qos: qoss[i],
|
||||||
client: c,
|
client: c,
|
||||||
queue: queue,
|
|
||||||
}
|
}
|
||||||
switch c.typ {
|
|
||||||
case CLIENT:
|
|
||||||
if _, exist := c.subs[topic]; !exist {
|
|
||||||
c.subs[topic] = sub
|
|
||||||
|
|
||||||
} else {
|
rqos, err := c.topicsMgr.Subscribe([]byte(topic), qoss[i], sub)
|
||||||
//if exist ,check whether qos change
|
|
||||||
c.subs[topic].qos = qoss[i]
|
|
||||||
retcodes = append(retcodes, qoss[i])
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
case ROUTER:
|
|
||||||
if subinfo, exist := c.rsubs[topic]; !exist {
|
|
||||||
sinfo := &subInfo{sub: sub, num: 1}
|
|
||||||
c.rsubs[topic] = sinfo
|
|
||||||
|
|
||||||
} else {
|
|
||||||
subinfo.num = subinfo.num + 1
|
|
||||||
retcodes = append(retcodes, qoss[i])
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
}
|
|
||||||
err := b.sl.Insert(sub)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error("Insert subscription error: ", err)
|
return
|
||||||
retcodes = append(retcodes, QosFailure)
|
|
||||||
} else {
|
|
||||||
retcodes = append(retcodes, qoss[i])
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
c.subMap[topic] = sub
|
||||||
|
c.session.AddTopic(topic, qoss[i])
|
||||||
|
retcodes = append(retcodes, rqos)
|
||||||
|
c.topicsMgr.Retained([]byte(topic), &c.rmsgs)
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
suback.ReturnCodes = retcodes
|
suback.ReturnCodes = retcodes
|
||||||
|
|
||||||
err := c.WriterPacket(suback)
|
err := c.WriterPacket(suback)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error("send suback error, ", err)
|
log.Error("send suback error, ", zap.Error(err), zap.String("ClientID", c.info.clientID))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
//broadcast subscribe message
|
//broadcast subscribe message
|
||||||
@@ -378,13 +303,11 @@ func (c *client) ProcessSubscribe(packet *packets.SubscribePacket) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
//process retain message
|
//process retain message
|
||||||
for _, t := range topics {
|
for _, rm := range c.rmsgs {
|
||||||
packets := b.rl.Match(t)
|
if err := c.WriterPacket(rm); err != nil {
|
||||||
for _, packet := range packets {
|
log.Error("Error publishing retained message:", zap.Any("err", err), zap.String("ClientID", c.info.clientID))
|
||||||
log.Info("process retain message: ", packet)
|
} else {
|
||||||
if packet != nil {
|
log.Info("process retain message: ", zap.Any("packet", packet), zap.String("ClientID", c.info.clientID))
|
||||||
c.WriterPacket(packet)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -397,34 +320,16 @@ func (c *client) ProcessUnSubscribe(packet *packets.UnsubscribePacket) {
|
|||||||
if b == nil {
|
if b == nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
typ := c.typ
|
|
||||||
topics := packet.Topics
|
topics := packet.Topics
|
||||||
|
|
||||||
for _, t := range topics {
|
for _, topic := range topics {
|
||||||
var sub *subscription
|
t := []byte(topic)
|
||||||
ok := false
|
sub, exist := c.subMap[topic]
|
||||||
switch typ {
|
if exist {
|
||||||
case CLIENT:
|
c.topicsMgr.Unsubscribe(t, sub)
|
||||||
sub, ok = c.subs[t]
|
c.session.RemoveTopic(topic)
|
||||||
case ROUTER:
|
delete(c.subMap, topic)
|
||||||
subinfo, ok := c.rsubs[t]
|
|
||||||
if ok {
|
|
||||||
subinfo.num = subinfo.num - 1
|
|
||||||
if subinfo.num < 1 {
|
|
||||||
sub = subinfo.sub
|
|
||||||
delete(c.rsubs, t)
|
|
||||||
} else {
|
|
||||||
c.rsubs[t] = subinfo
|
|
||||||
sub = nil
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
if ok {
|
|
||||||
go c.unsubscribe(sub)
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
unsuback := packets.NewControlPacket(packets.Unsuback).(*packets.UnsubackPacket)
|
unsuback := packets.NewControlPacket(packets.Unsuback).(*packets.UnsubackPacket)
|
||||||
@@ -432,7 +337,7 @@ func (c *client) ProcessUnSubscribe(packet *packets.UnsubscribePacket) {
|
|||||||
|
|
||||||
err := c.WriterPacket(unsuback)
|
err := c.WriterPacket(unsuback)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error("send unsuback error, ", err)
|
log.Error("send unsuback error, ", zap.Error(err), zap.String("ClientID", c.info.clientID))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
// //process ubsubscribe message
|
// //process ubsubscribe message
|
||||||
@@ -441,19 +346,6 @@ func (c *client) ProcessUnSubscribe(packet *packets.UnsubscribePacket) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *client) unsubscribe(sub *subscription) {
|
|
||||||
|
|
||||||
if c.typ == CLIENT {
|
|
||||||
delete(c.subs, sub.topic)
|
|
||||||
|
|
||||||
}
|
|
||||||
b := c.broker
|
|
||||||
if b != nil && sub != nil {
|
|
||||||
b.sl.Remove(sub)
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *client) ProcessPing() {
|
func (c *client) ProcessPing() {
|
||||||
if c.status == Disconnected {
|
if c.status == Disconnected {
|
||||||
return
|
return
|
||||||
@@ -461,39 +353,67 @@ func (c *client) ProcessPing() {
|
|||||||
resp := packets.NewControlPacket(packets.Pingresp).(*packets.PingrespPacket)
|
resp := packets.NewControlPacket(packets.Pingresp).(*packets.PingrespPacket)
|
||||||
err := c.WriterPacket(resp)
|
err := c.WriterPacket(resp)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error("send PingResponse error, ", err)
|
log.Error("send PingResponse error, ", zap.Error(err), zap.String("ClientID", c.info.clientID))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *client) Close() {
|
func (c *client) Close() {
|
||||||
c.smu.Lock()
|
if c.status == Disconnected {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
c.cancelFunc()
|
||||||
|
|
||||||
c.status = Disconnected
|
c.status = Disconnected
|
||||||
|
//wait for message complete
|
||||||
|
time.Sleep(1 * time.Second)
|
||||||
|
// c.status = Disconnected
|
||||||
|
|
||||||
if c.conn != nil {
|
if c.conn != nil {
|
||||||
c.conn.Close()
|
c.conn.Close()
|
||||||
c.conn = nil
|
c.conn = nil
|
||||||
}
|
}
|
||||||
c.smu.Unlock()
|
|
||||||
b := c.broker
|
b := c.broker
|
||||||
subs := c.subs
|
subs := c.subMap
|
||||||
if b != nil {
|
if b != nil {
|
||||||
b.removeClient(c)
|
b.removeClient(c)
|
||||||
for _, sub := range subs {
|
|
||||||
err := b.sl.Remove(sub)
|
|
||||||
if err != nil {
|
|
||||||
log.Error("closed client but remove sublist error, ", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if c.typ == CLIENT {
|
if c.typ == CLIENT {
|
||||||
b.BroadcastUnSubscribe(subs)
|
b.BroadcastUnSubscribe(subs)
|
||||||
|
//offline notification
|
||||||
|
b.OnlineOfflineNotification(c.info.clientID, false)
|
||||||
}
|
}
|
||||||
|
|
||||||
if c.info.willMsg != nil {
|
if c.info.willMsg != nil {
|
||||||
b.PublishMessage(c.info.willMsg)
|
b.PublishMessage(c.info.willMsg)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if c.typ == CLUSTER {
|
||||||
|
b.ConnectToDiscovery()
|
||||||
|
}
|
||||||
|
|
||||||
|
//do reconnect
|
||||||
|
if c.typ == REMOTE {
|
||||||
|
go b.connectRouter(c.route.remoteID, c.route.remoteUrl)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *client) WriterPacket(packet packets.ControlPacket) error {
|
func (c *client) WriterPacket(packet packets.ControlPacket) error {
|
||||||
|
if c.status == Disconnected {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if packet == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if c.conn == nil {
|
||||||
|
c.Close()
|
||||||
|
return errors.New("connect lost ....")
|
||||||
|
}
|
||||||
|
|
||||||
c.mu.Lock()
|
c.mu.Lock()
|
||||||
err := packet.Write(c.conn)
|
err := packet.Write(c.conn)
|
||||||
c.mu.Unlock()
|
c.mu.Unlock()
|
||||||
|
|||||||
@@ -1,3 +1,5 @@
|
|||||||
|
/* Copyright (c) 2018, joy.zhou <chowyu08@gmail.com>
|
||||||
|
*/
|
||||||
package broker
|
package broker
|
||||||
|
|
||||||
import (
|
import (
|
||||||
@@ -5,10 +7,8 @@ import (
|
|||||||
"crypto/rand"
|
"crypto/rand"
|
||||||
"encoding/base64"
|
"encoding/base64"
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
"errors"
|
|
||||||
"io"
|
"io"
|
||||||
"reflect"
|
"reflect"
|
||||||
"strings"
|
|
||||||
"time"
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -46,47 +46,6 @@ const (
|
|||||||
QosFailure = 0x80
|
QosFailure = 0x80
|
||||||
)
|
)
|
||||||
|
|
||||||
func SubscribeTopicCheckAndSpilt(topic string) ([]string, error) {
|
|
||||||
if strings.Index(topic, "#") != -1 && strings.Index(topic, "#") != len(topic)-1 {
|
|
||||||
return nil, errors.New("Topic format error with index of #")
|
|
||||||
}
|
|
||||||
re := strings.Split(topic, "/")
|
|
||||||
for i, v := range re {
|
|
||||||
if i != 0 && i != (len(re)-1) {
|
|
||||||
if v == "" {
|
|
||||||
return nil, errors.New("Topic format error with index of //")
|
|
||||||
}
|
|
||||||
if strings.Contains(v, "+") && v != "+" {
|
|
||||||
return nil, errors.New("Topic format error with index of +")
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
if v == "" {
|
|
||||||
re[i] = "/"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return re, nil
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
func PublishTopicCheckAndSpilt(topic string) ([]string, error) {
|
|
||||||
if strings.Index(topic, "#") != -1 || strings.Index(topic, "+") != -1 {
|
|
||||||
return nil, errors.New("Publish Topic format error with + and #")
|
|
||||||
}
|
|
||||||
re := strings.Split(topic, "/")
|
|
||||||
for i, v := range re {
|
|
||||||
if v == "" {
|
|
||||||
if i != 0 && i != (len(re)-1) {
|
|
||||||
return nil, errors.New("Topic format error with index of //")
|
|
||||||
} else {
|
|
||||||
re[i] = "/"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
return re, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func equal(k1, k2 interface{}) bool {
|
func equal(k1, k2 interface{}) bool {
|
||||||
if reflect.TypeOf(k1) != reflect.TypeOf(k2) {
|
if reflect.TypeOf(k1) != reflect.TypeOf(k2) {
|
||||||
return false
|
return false
|
||||||
|
|||||||
131
broker/config.go
131
broker/config.go
@@ -1,17 +1,19 @@
|
|||||||
|
/* Copyright (c) 2018, joy.zhou <chowyu08@gmail.com>
|
||||||
|
*/
|
||||||
package broker
|
package broker
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"crypto/tls"
|
"crypto/tls"
|
||||||
"crypto/x509"
|
"crypto/x509"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
|
"flag"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
|
"os"
|
||||||
|
|
||||||
log "github.com/cihub/seelog"
|
"github.com/fhmq/hmq/logger"
|
||||||
)
|
"go.uber.org/zap"
|
||||||
|
|
||||||
const (
|
|
||||||
CONFIGFILE = "hmq.config"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type Config struct {
|
type Config struct {
|
||||||
@@ -19,6 +21,7 @@ type Config struct {
|
|||||||
Host string `json:"host"`
|
Host string `json:"host"`
|
||||||
Port string `json:"port"`
|
Port string `json:"port"`
|
||||||
Cluster RouteInfo `json:"cluster"`
|
Cluster RouteInfo `json:"cluster"`
|
||||||
|
Router string `json:"router"`
|
||||||
TlsHost string `json:"tlsHost"`
|
TlsHost string `json:"tlsHost"`
|
||||||
TlsPort string `json:"tlsPort"`
|
TlsPort string `json:"tlsPort"`
|
||||||
WsPath string `json:"wsPath"`
|
WsPath string `json:"wsPath"`
|
||||||
@@ -27,12 +30,12 @@ type Config struct {
|
|||||||
TlsInfo TLSInfo `json:"tlsInfo"`
|
TlsInfo TLSInfo `json:"tlsInfo"`
|
||||||
Acl bool `json:"acl"`
|
Acl bool `json:"acl"`
|
||||||
AclConf string `json:"aclConf"`
|
AclConf string `json:"aclConf"`
|
||||||
|
Debug bool `json:"-"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type RouteInfo struct {
|
type RouteInfo struct {
|
||||||
Host string `json:"host"`
|
Host string `json:"host"`
|
||||||
Port string `json:"port"`
|
Port string `json:"port"`
|
||||||
Routes []string `json:"routes"`
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type TLSInfo struct {
|
type TLSInfo struct {
|
||||||
@@ -42,11 +45,94 @@ type TLSInfo struct {
|
|||||||
KeyFile string `json:"keyFile"`
|
KeyFile string `json:"keyFile"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func LoadConfig() (*Config, error) {
|
var DefaultConfig *Config = &Config{
|
||||||
|
Worker: 4096,
|
||||||
|
Host: "0.0.0.0",
|
||||||
|
Port: "1883",
|
||||||
|
Acl: false,
|
||||||
|
}
|
||||||
|
|
||||||
content, err := ioutil.ReadFile(CONFIGFILE)
|
var (
|
||||||
|
log *zap.Logger
|
||||||
|
)
|
||||||
|
|
||||||
|
func showHelp() {
|
||||||
|
fmt.Printf("%s\n", usageStr)
|
||||||
|
os.Exit(0)
|
||||||
|
}
|
||||||
|
|
||||||
|
func ConfigureConfig(args []string) (*Config, error) {
|
||||||
|
config := &Config{}
|
||||||
|
var (
|
||||||
|
help bool
|
||||||
|
configFile string
|
||||||
|
)
|
||||||
|
fs := flag.NewFlagSet("hmq-broker", flag.ExitOnError)
|
||||||
|
fs.Usage = showHelp
|
||||||
|
|
||||||
|
fs.BoolVar(&help, "h", false, "Show this message.")
|
||||||
|
fs.BoolVar(&help, "help", false, "Show this message.")
|
||||||
|
fs.IntVar(&config.Worker, "w", 1024, "worker num to process message, perfer (client num)/10.")
|
||||||
|
fs.IntVar(&config.Worker, "worker", 1024, "worker num to process message, perfer (client num)/10.")
|
||||||
|
fs.StringVar(&config.Port, "port", "1883", "Port to listen on.")
|
||||||
|
fs.StringVar(&config.Port, "p", "1883", "Port to listen on.")
|
||||||
|
fs.StringVar(&config.Host, "host", "0.0.0.0", "Network host to listen on")
|
||||||
|
fs.StringVar(&config.Cluster.Port, "cp", "", "Cluster port from which members can connect.")
|
||||||
|
fs.StringVar(&config.Cluster.Port, "clusterport", "", "Cluster port from which members can connect.")
|
||||||
|
fs.StringVar(&config.Router, "r", "", "Router who maintenance cluster info")
|
||||||
|
fs.StringVar(&config.Router, "router", "", "Router who maintenance cluster info")
|
||||||
|
fs.StringVar(&config.WsPort, "ws", "", "port for ws to listen on")
|
||||||
|
fs.StringVar(&config.WsPort, "wsport", "", "port for ws to listen on")
|
||||||
|
fs.StringVar(&config.WsPath, "wsp", "", "path for ws to listen on")
|
||||||
|
fs.StringVar(&config.WsPath, "wspath", "", "path for ws to listen on")
|
||||||
|
fs.StringVar(&configFile, "config", "", "config file for hmq")
|
||||||
|
fs.StringVar(&configFile, "c", "", "config file for hmq")
|
||||||
|
fs.BoolVar(&config.Debug, "debug", false, "enable Debug logging.")
|
||||||
|
fs.BoolVar(&config.Debug, "d", false, "enable Debug logging.")
|
||||||
|
|
||||||
|
fs.Bool("D", true, "enable Debug logging.")
|
||||||
|
|
||||||
|
if err := fs.Parse(args); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if help {
|
||||||
|
showHelp()
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
fs.Visit(func(f *flag.Flag) {
|
||||||
|
switch f.Name {
|
||||||
|
case "D":
|
||||||
|
config.Debug = true
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
logger.InitLogger(config.Debug)
|
||||||
|
log = logger.Get().Named("Broker")
|
||||||
|
|
||||||
|
if configFile != "" {
|
||||||
|
tmpConfig, e := LoadConfig(configFile)
|
||||||
|
if e != nil {
|
||||||
|
return nil, e
|
||||||
|
} else {
|
||||||
|
config = tmpConfig
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := config.check(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return config, nil
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func LoadConfig(filename string) (*Config, error) {
|
||||||
|
|
||||||
|
content, err := ioutil.ReadFile(filename)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error("Read config file error: ", err)
|
log.Error("Read config file error: ", zap.Error(err))
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
// log.Info(string(content))
|
// log.Info(string(content))
|
||||||
@@ -54,16 +140,19 @@ func LoadConfig() (*Config, error) {
|
|||||||
var config Config
|
var config Config
|
||||||
err = json.Unmarshal(content, &config)
|
err = json.Unmarshal(content, &config)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error("Unmarshal config file error: ", err)
|
log.Error("Unmarshal config file error: ", zap.Error(err))
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return &config, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (config *Config) check() error {
|
||||||
|
|
||||||
if config.Worker == 0 {
|
if config.Worker == 0 {
|
||||||
config.Worker = 1024
|
config.Worker = 1024
|
||||||
}
|
}
|
||||||
|
|
||||||
WorkNum = config.Worker
|
|
||||||
|
|
||||||
if config.Port != "" {
|
if config.Port != "" {
|
||||||
if config.Host == "" {
|
if config.Host == "" {
|
||||||
config.Host = "0.0.0.0"
|
config.Host = "0.0.0.0"
|
||||||
@@ -75,29 +164,33 @@ func LoadConfig() (*Config, error) {
|
|||||||
config.Cluster.Host = "0.0.0.0"
|
config.Cluster.Host = "0.0.0.0"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if config.Router != "" {
|
||||||
|
if config.Cluster.Port == "" {
|
||||||
|
return errors.New("cluster port is null")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if config.TlsPort != "" {
|
if config.TlsPort != "" {
|
||||||
if config.TlsInfo.CertFile == "" || config.TlsInfo.KeyFile == "" {
|
if config.TlsInfo.CertFile == "" || config.TlsInfo.KeyFile == "" {
|
||||||
log.Error("tls config error, no cert or key file.")
|
log.Error("tls config error, no cert or key file.")
|
||||||
return nil, err
|
return errors.New("tls config error, no cert or key file.")
|
||||||
}
|
}
|
||||||
if config.TlsHost == "" {
|
if config.TlsHost == "" {
|
||||||
config.TlsHost = "0.0.0.0"
|
config.TlsHost = "0.0.0.0"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
return nil
|
||||||
return &config, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewTLSConfig(tlsInfo TLSInfo) (*tls.Config, error) {
|
func NewTLSConfig(tlsInfo TLSInfo) (*tls.Config, error) {
|
||||||
|
|
||||||
cert, err := tls.LoadX509KeyPair(tlsInfo.CertFile, tlsInfo.KeyFile)
|
cert, err := tls.LoadX509KeyPair(tlsInfo.CertFile, tlsInfo.KeyFile)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("error parsing X509 certificate/key pair: %v", err)
|
return nil, fmt.Errorf("error parsing X509 certificate/key pair: %v", zap.Error(err))
|
||||||
}
|
}
|
||||||
cert.Leaf, err = x509.ParseCertificate(cert.Certificate[0])
|
cert.Leaf, err = x509.ParseCertificate(cert.Certificate[0])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("error parsing certificate: %v", err)
|
return nil, fmt.Errorf("error parsing certificate: %v", zap.Error(err))
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create TLSConfig
|
// Create TLSConfig
|
||||||
|
|||||||
@@ -1,48 +0,0 @@
|
|||||||
package broker
|
|
||||||
|
|
||||||
// const (
|
|
||||||
// WorkNum = 4096
|
|
||||||
// )
|
|
||||||
|
|
||||||
var WorkNum int
|
|
||||||
|
|
||||||
type Dispatcher struct {
|
|
||||||
WorkerPool chan chan *Message
|
|
||||||
}
|
|
||||||
|
|
||||||
func StartDispatcher() {
|
|
||||||
InitMessagePool()
|
|
||||||
dispatcher := NewDispatcher()
|
|
||||||
dispatcher.Run()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *Dispatcher) Run() {
|
|
||||||
// starting n number of workers
|
|
||||||
for i := 0; i < WorkNum; i++ {
|
|
||||||
worker := NewWorker(d.WorkerPool)
|
|
||||||
worker.Start()
|
|
||||||
}
|
|
||||||
go d.dispatch()
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewDispatcher() *Dispatcher {
|
|
||||||
pool := make(chan chan *Message, WorkNum)
|
|
||||||
return &Dispatcher{WorkerPool: pool}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *Dispatcher) dispatch() {
|
|
||||||
for i := 0; i < MessagePoolNum; i++ {
|
|
||||||
go func(idx int) {
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case msg := <-MSGPool[idx].queue:
|
|
||||||
go func(msg *Message) {
|
|
||||||
msgChannel := <-d.WorkerPool
|
|
||||||
msgChannel <- msg
|
|
||||||
}(msg)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}(i)
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
@@ -1,13 +1,14 @@
|
|||||||
|
/* Copyright (c) 2018, joy.zhou <chowyu08@gmail.com>
|
||||||
|
*/
|
||||||
package broker
|
package broker
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/eclipse/paho.mqtt.golang/packets"
|
|
||||||
|
|
||||||
simplejson "github.com/bitly/go-simplejson"
|
simplejson "github.com/bitly/go-simplejson"
|
||||||
log "github.com/cihub/seelog"
|
"github.com/eclipse/paho.mqtt.golang/packets"
|
||||||
|
"go.uber.org/zap"
|
||||||
)
|
)
|
||||||
|
|
||||||
func (c *client) SendInfo() {
|
func (c *client) SendInfo() {
|
||||||
@@ -19,28 +20,31 @@ func (c *client) SendInfo() {
|
|||||||
infoMsg := NewInfo(c.broker.id, url, false)
|
infoMsg := NewInfo(c.broker.id, url, false)
|
||||||
err := c.WriterPacket(infoMsg)
|
err := c.WriterPacket(infoMsg)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error("send info message error, ", err)
|
log.Error("send info message error, ", zap.Error(err))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
// log.Info("send info success")
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *client) StartPing() {
|
func (c *client) StartPing() {
|
||||||
timeTicker := time.NewTicker(time.Second * 30)
|
timeTicker := time.NewTicker(time.Second * 50)
|
||||||
ping := packets.NewControlPacket(packets.Pingreq).(*packets.PingreqPacket)
|
ping := packets.NewControlPacket(packets.Pingreq).(*packets.PingreqPacket)
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case <-timeTicker.C:
|
case <-timeTicker.C:
|
||||||
err := c.WriterPacket(ping)
|
err := c.WriterPacket(ping)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error("ping error: ", err)
|
log.Error("ping error: ", zap.Error(err))
|
||||||
|
c.Close()
|
||||||
}
|
}
|
||||||
|
case <-c.ctx.Done():
|
||||||
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *client) SendConnect() {
|
func (c *client) SendConnect() {
|
||||||
if c.status == Disconnected {
|
|
||||||
|
if c.status != Connected {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
m := packets.NewControlPacket(packets.Connect).(*packets.ConnectPacket)
|
m := packets.NewControlPacket(packets.Connect).(*packets.ConnectPacket)
|
||||||
@@ -50,10 +54,10 @@ func (c *client) SendConnect() {
|
|||||||
m.Keepalive = uint16(60)
|
m.Keepalive = uint16(60)
|
||||||
err := c.WriterPacket(m)
|
err := c.WriterPacket(m)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error("send connect message error, ", err)
|
log.Error("send connect message error, ", zap.Error(err))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
// log.Info("send connet success")
|
log.Info("send connect success")
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewInfo(sid, url string, isforword bool) *packets.PublishPacket {
|
func NewInfo(sid, url string, isforword bool) *packets.PublishPacket {
|
||||||
@@ -61,7 +65,7 @@ func NewInfo(sid, url string, isforword bool) *packets.PublishPacket {
|
|||||||
pub.Qos = 0
|
pub.Qos = 0
|
||||||
pub.TopicName = BrokerInfoTopic
|
pub.TopicName = BrokerInfoTopic
|
||||||
pub.Retain = false
|
pub.Retain = false
|
||||||
info := fmt.Sprintf(`{"remoteID":"%s","url":"%s","isForward":%t}`, sid, url, isforword)
|
info := fmt.Sprintf(`{"brokerID":"%s","brokerUrl":"%s"}`, sid, url)
|
||||||
// log.Info("new info", string(info))
|
// log.Info("new info", string(info))
|
||||||
pub.Payload = []byte(info)
|
pub.Payload = []byte(info)
|
||||||
return pub
|
return pub
|
||||||
@@ -74,47 +78,36 @@ func (c *client) ProcessInfo(packet *packets.PublishPacket) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Info("recv remoteInfo: ", string(packet.Payload))
|
log.Info("recv remoteInfo: ", zap.String("payload", string(packet.Payload)))
|
||||||
|
|
||||||
js, e := simplejson.NewJson(packet.Payload)
|
js, err := simplejson.NewJson(packet.Payload)
|
||||||
if e != nil {
|
if err != nil {
|
||||||
log.Warn("parse info message err", e)
|
log.Warn("parse info message err", zap.Error(err))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
rid := js.Get("remoteID").MustString()
|
routes, err := js.Get("data").Map()
|
||||||
rurl := js.Get("url").MustString()
|
if routes == nil {
|
||||||
isForward := js.Get("isForward").MustBool()
|
log.Error("receive info message error, ", zap.Error(err))
|
||||||
|
|
||||||
if rid == "" {
|
|
||||||
log.Error("receive info message error with remoteID is null")
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if rid == b.id {
|
b.nodes = routes
|
||||||
if !isForward {
|
|
||||||
c.Close() //close connet self
|
b.mu.Lock()
|
||||||
|
for rid, rurl := range routes {
|
||||||
|
if rid == b.id {
|
||||||
|
continue
|
||||||
}
|
}
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
exist := b.CheckRemoteExist(rid, rurl)
|
url, ok := rurl.(string)
|
||||||
if !exist {
|
if ok {
|
||||||
go b.connectRouter(rurl, rid)
|
exist := b.CheckRemoteExist(rid, url)
|
||||||
}
|
if !exist {
|
||||||
// log.Info("isforword: ", isForward)
|
b.connectRouter(rid, url)
|
||||||
if !isForward {
|
}
|
||||||
route := &route{
|
|
||||||
remoteUrl: rurl,
|
|
||||||
remoteID: rid,
|
|
||||||
}
|
}
|
||||||
c.route = route
|
|
||||||
|
|
||||||
go b.SendLocalSubsToRouter(c)
|
|
||||||
// log.Info("BroadcastInfoMessage starting... ")
|
|
||||||
infoMsg := NewInfo(rid, rurl, true)
|
|
||||||
b.BroadcastInfoMessage(rid, infoMsg)
|
|
||||||
}
|
}
|
||||||
|
b.mu.Unlock()
|
||||||
return
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,62 +0,0 @@
|
|||||||
package broker
|
|
||||||
|
|
||||||
import (
|
|
||||||
"sync"
|
|
||||||
|
|
||||||
"github.com/eclipse/paho.mqtt.golang/packets"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
MaxUser = 1024 * 1024
|
|
||||||
MessagePoolNum = 1024
|
|
||||||
MessagePoolUser = MaxUser / MessagePoolNum
|
|
||||||
MessagePoolMessageNum = MaxUser / MessagePoolNum * 4
|
|
||||||
)
|
|
||||||
|
|
||||||
type Message struct {
|
|
||||||
client *client
|
|
||||||
packet packets.ControlPacket
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
|
||||||
MSGPool []MessagePool
|
|
||||||
)
|
|
||||||
|
|
||||||
type MessagePool struct {
|
|
||||||
l sync.Mutex
|
|
||||||
maxuser int
|
|
||||||
user int
|
|
||||||
queue chan *Message
|
|
||||||
}
|
|
||||||
|
|
||||||
func InitMessagePool() {
|
|
||||||
MSGPool = make([]MessagePool, (MessagePoolNum + 2))
|
|
||||||
for i := 0; i < (MessagePoolNum + 2); i++ {
|
|
||||||
MSGPool[i].Init(MessagePoolUser, MessagePoolMessageNum)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *MessagePool) Init(num int, maxusernum int) {
|
|
||||||
p.maxuser = maxusernum
|
|
||||||
p.queue = make(chan *Message, num)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *MessagePool) GetPool() *MessagePool {
|
|
||||||
p.l.Lock()
|
|
||||||
if p.user+1 < p.maxuser {
|
|
||||||
p.user += 1
|
|
||||||
p.l.Unlock()
|
|
||||||
return p
|
|
||||||
} else {
|
|
||||||
p.l.Unlock()
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *MessagePool) Reduce() {
|
|
||||||
p.l.Lock()
|
|
||||||
p.user -= 1
|
|
||||||
p.l.Unlock()
|
|
||||||
|
|
||||||
}
|
|
||||||
122
broker/retain.go
122
broker/retain.go
@@ -1,122 +0,0 @@
|
|||||||
package broker
|
|
||||||
|
|
||||||
import (
|
|
||||||
"sync"
|
|
||||||
|
|
||||||
"github.com/eclipse/paho.mqtt.golang/packets"
|
|
||||||
)
|
|
||||||
|
|
||||||
type RetainList struct {
|
|
||||||
sync.RWMutex
|
|
||||||
root *rlevel
|
|
||||||
}
|
|
||||||
type rlevel struct {
|
|
||||||
nodes map[string]*rnode
|
|
||||||
}
|
|
||||||
type rnode struct {
|
|
||||||
next *rlevel
|
|
||||||
msg *packets.PublishPacket
|
|
||||||
}
|
|
||||||
type RetainResult struct {
|
|
||||||
msg []*packets.PublishPacket
|
|
||||||
}
|
|
||||||
|
|
||||||
func newRNode() *rnode {
|
|
||||||
return &rnode{}
|
|
||||||
}
|
|
||||||
|
|
||||||
func newRLevel() *rlevel {
|
|
||||||
return &rlevel{nodes: make(map[string]*rnode)}
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewRetainList() *RetainList {
|
|
||||||
return &RetainList{root: newRLevel()}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *RetainList) Insert(topic string, buf *packets.PublishPacket) error {
|
|
||||||
|
|
||||||
tokens, err := PublishTopicCheckAndSpilt(topic)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
// log.Info("insert tokens:", tokens)
|
|
||||||
r.Lock()
|
|
||||||
|
|
||||||
l := r.root
|
|
||||||
var n *rnode
|
|
||||||
for _, t := range tokens {
|
|
||||||
n = l.nodes[t]
|
|
||||||
if n == nil {
|
|
||||||
n = newRNode()
|
|
||||||
l.nodes[t] = n
|
|
||||||
}
|
|
||||||
if n.next == nil {
|
|
||||||
n.next = newRLevel()
|
|
||||||
}
|
|
||||||
l = n.next
|
|
||||||
}
|
|
||||||
n.msg = buf
|
|
||||||
r.Unlock()
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *RetainList) Match(topic string) []*packets.PublishPacket {
|
|
||||||
|
|
||||||
tokens, err := SubscribeTopicCheckAndSpilt(topic)
|
|
||||||
if err != nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
results := &RetainResult{}
|
|
||||||
|
|
||||||
r.Lock()
|
|
||||||
l := r.root
|
|
||||||
matchRLevel(l, tokens, results)
|
|
||||||
r.Unlock()
|
|
||||||
// log.Info("results: ", results)
|
|
||||||
return results.msg
|
|
||||||
|
|
||||||
}
|
|
||||||
func matchRLevel(l *rlevel, toks []string, results *RetainResult) {
|
|
||||||
var n *rnode
|
|
||||||
for i, t := range toks {
|
|
||||||
if l == nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
// log.Info("l info :", l.nodes)
|
|
||||||
if t == "#" {
|
|
||||||
for _, n := range l.nodes {
|
|
||||||
n.GetAll(results)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if t == "+" {
|
|
||||||
for _, n := range l.nodes {
|
|
||||||
if len(t[i+1:]) == 0 {
|
|
||||||
results.msg = append(results.msg, n.msg)
|
|
||||||
} else {
|
|
||||||
matchRLevel(n.next, toks[i+1:], results)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
n = l.nodes[t]
|
|
||||||
if n != nil {
|
|
||||||
l = n.next
|
|
||||||
} else {
|
|
||||||
l = nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if n != nil {
|
|
||||||
results.msg = append(results.msg, n.msg)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *rnode) GetAll(results *RetainResult) {
|
|
||||||
// log.Info("node 's message: ", string(r.msg))
|
|
||||||
if r.msg != nil {
|
|
||||||
results.msg = append(results.msg, r.msg)
|
|
||||||
}
|
|
||||||
l := r.next
|
|
||||||
for _, n := range l.nodes {
|
|
||||||
n.GetAll(results)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
53
broker/sesson.go
Normal file
53
broker/sesson.go
Normal file
@@ -0,0 +1,53 @@
|
|||||||
|
package broker
|
||||||
|
|
||||||
|
import "github.com/eclipse/paho.mqtt.golang/packets"
|
||||||
|
|
||||||
|
func (b *Broker) getSession(cli *client, req *packets.ConnectPacket, resp *packets.ConnackPacket) error {
|
||||||
|
// If CleanSession is set to 0, the server MUST resume communications with the
|
||||||
|
// client based on state from the current session, as identified by the client
|
||||||
|
// identifier. If there is no session associated with the client identifier the
|
||||||
|
// server must create a new session.
|
||||||
|
//
|
||||||
|
// If CleanSession is set to 1, the client and server must discard any previous
|
||||||
|
// session and start a new one. b session lasts as long as the network c
|
||||||
|
// onnection. State data associated with b session must not be reused in any
|
||||||
|
// subsequent session.
|
||||||
|
|
||||||
|
var err error
|
||||||
|
|
||||||
|
// Check to see if the client supplied an ID, if not, generate one and set
|
||||||
|
// clean session.
|
||||||
|
|
||||||
|
if len(req.ClientIdentifier) == 0 {
|
||||||
|
req.CleanSession = true
|
||||||
|
}
|
||||||
|
|
||||||
|
cid := req.ClientIdentifier
|
||||||
|
|
||||||
|
// If CleanSession is NOT set, check the session store for existing session.
|
||||||
|
// If found, return it.
|
||||||
|
if !req.CleanSession {
|
||||||
|
if cli.session, err = b.sessionMgr.Get(cid); err == nil {
|
||||||
|
resp.SessionPresent = true
|
||||||
|
|
||||||
|
if err := cli.session.Update(req); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// If CleanSession, or no existing session found, then create a new one
|
||||||
|
if cli.session == nil {
|
||||||
|
if cli.session, err = b.sessionMgr.New(cid); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
resp.SessionPresent = false
|
||||||
|
|
||||||
|
if err := cli.session.Init(req); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
@@ -1,318 +0,0 @@
|
|||||||
package broker
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"sync"
|
|
||||||
|
|
||||||
log "github.com/cihub/seelog"
|
|
||||||
)
|
|
||||||
|
|
||||||
// A result structure better optimized for queue subs.
|
|
||||||
type SublistResult struct {
|
|
||||||
psubs []*subscription
|
|
||||||
qsubs []*subscription // don't make this a map, too expensive to iterate
|
|
||||||
}
|
|
||||||
|
|
||||||
// A Sublist stores and efficiently retrieves subscriptions.
|
|
||||||
type Sublist struct {
|
|
||||||
sync.RWMutex
|
|
||||||
cache map[string]*SublistResult
|
|
||||||
root *level
|
|
||||||
}
|
|
||||||
|
|
||||||
// A node contains subscriptions and a pointer to the next level.
|
|
||||||
type node struct {
|
|
||||||
next *level
|
|
||||||
psubs []*subscription
|
|
||||||
qsubs []*subscription
|
|
||||||
}
|
|
||||||
|
|
||||||
// A level represents a group of nodes and special pointers to
|
|
||||||
// wildcard nodes.
|
|
||||||
type level struct {
|
|
||||||
nodes map[string]*node
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create a new default node.
|
|
||||||
func newNode() *node {
|
|
||||||
return &node{psubs: make([]*subscription, 0, 4), qsubs: make([]*subscription, 0, 4)}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create a new default level. We use FNV1A as the hash
|
|
||||||
// algortihm for the tokens, which should be short.
|
|
||||||
func newLevel() *level {
|
|
||||||
return &level{nodes: make(map[string]*node)}
|
|
||||||
}
|
|
||||||
|
|
||||||
// New will create a default sublist
|
|
||||||
func NewSublist() *Sublist {
|
|
||||||
return &Sublist{root: newLevel(), cache: make(map[string]*SublistResult)}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Insert adds a subscription into the sublist
|
|
||||||
func (s *Sublist) Insert(sub *subscription) error {
|
|
||||||
|
|
||||||
tokens, err := SubscribeTopicCheckAndSpilt(sub.topic)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
s.Lock()
|
|
||||||
|
|
||||||
l := s.root
|
|
||||||
var n *node
|
|
||||||
for _, t := range tokens {
|
|
||||||
n = l.nodes[t]
|
|
||||||
if n == nil {
|
|
||||||
n = newNode()
|
|
||||||
l.nodes[t] = n
|
|
||||||
}
|
|
||||||
if n.next == nil {
|
|
||||||
n.next = newLevel()
|
|
||||||
}
|
|
||||||
l = n.next
|
|
||||||
}
|
|
||||||
if sub.queue {
|
|
||||||
//check qsub is already exist
|
|
||||||
for i := range n.qsubs {
|
|
||||||
if equal(n.qsubs[i], sub) {
|
|
||||||
n.qsubs[i] = sub
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
n.qsubs = append(n.qsubs, sub)
|
|
||||||
} else {
|
|
||||||
//check psub is already exist
|
|
||||||
for i := range n.psubs {
|
|
||||||
if equal(n.psubs[i], sub) {
|
|
||||||
n.psubs[i] = sub
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
n.psubs = append(n.psubs, sub)
|
|
||||||
}
|
|
||||||
|
|
||||||
topic := string(sub.topic)
|
|
||||||
s.addToCache(topic, sub)
|
|
||||||
s.Unlock()
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Sublist) addToCache(topic string, sub *subscription) {
|
|
||||||
for k, r := range s.cache {
|
|
||||||
if matchLiteral(k, topic) {
|
|
||||||
// Copy since others may have a reference.
|
|
||||||
nr := copyResult(r)
|
|
||||||
if sub.queue == false {
|
|
||||||
nr.psubs = append(nr.psubs, sub)
|
|
||||||
} else {
|
|
||||||
nr.qsubs = append(nr.qsubs, sub)
|
|
||||||
}
|
|
||||||
s.cache[k] = nr
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Sublist) removeFromCache(topic string, sub *subscription) {
|
|
||||||
for k := range s.cache {
|
|
||||||
if !matchLiteral(k, topic) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
// Since someone else may be referecing, can't modify the list
|
|
||||||
// safely, just let it re-populate.
|
|
||||||
delete(s.cache, k)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func matchLiteral(literal, topic string) bool {
|
|
||||||
tok, _ := SubscribeTopicCheckAndSpilt(topic)
|
|
||||||
li, _ := PublishTopicCheckAndSpilt(literal)
|
|
||||||
|
|
||||||
for i := 0; i < len(tok); i++ {
|
|
||||||
b := tok[i]
|
|
||||||
switch b {
|
|
||||||
case "+":
|
|
||||||
|
|
||||||
case "#":
|
|
||||||
return true
|
|
||||||
default:
|
|
||||||
if b != li[i] {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// Deep copy
|
|
||||||
func copyResult(r *SublistResult) *SublistResult {
|
|
||||||
nr := &SublistResult{}
|
|
||||||
nr.psubs = append([]*subscription(nil), r.psubs...)
|
|
||||||
nr.qsubs = append([]*subscription(nil), r.qsubs...)
|
|
||||||
return nr
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Sublist) Remove(sub *subscription) error {
|
|
||||||
tokens, err := SubscribeTopicCheckAndSpilt(sub.topic)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
s.Lock()
|
|
||||||
defer s.Unlock()
|
|
||||||
|
|
||||||
l := s.root
|
|
||||||
var n *node
|
|
||||||
|
|
||||||
for _, t := range tokens {
|
|
||||||
if l == nil {
|
|
||||||
return errors.New("No Matches subscription Found")
|
|
||||||
}
|
|
||||||
n = l.nodes[t]
|
|
||||||
if n != nil {
|
|
||||||
l = n.next
|
|
||||||
} else {
|
|
||||||
l = nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if !s.removeFromNode(n, sub) {
|
|
||||||
return errors.New("No Matches subscription Found")
|
|
||||||
}
|
|
||||||
topic := string(sub.topic)
|
|
||||||
s.removeFromCache(topic, sub)
|
|
||||||
return nil
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Sublist) removeFromNode(n *node, sub *subscription) (found bool) {
|
|
||||||
if n == nil {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
if sub.queue {
|
|
||||||
n.qsubs, found = removeSubFromList(sub, n.qsubs)
|
|
||||||
return found
|
|
||||||
} else {
|
|
||||||
n.psubs, found = removeSubFromList(sub, n.psubs)
|
|
||||||
return found
|
|
||||||
}
|
|
||||||
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Sublist) Match(topic string) *SublistResult {
|
|
||||||
s.RLock()
|
|
||||||
rc, ok := s.cache[topic]
|
|
||||||
s.RUnlock()
|
|
||||||
|
|
||||||
if ok {
|
|
||||||
return rc
|
|
||||||
}
|
|
||||||
|
|
||||||
tokens, err := PublishTopicCheckAndSpilt(topic)
|
|
||||||
if err != nil {
|
|
||||||
log.Error("\tserver/sublist.go: ", err)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
result := &SublistResult{}
|
|
||||||
|
|
||||||
s.Lock()
|
|
||||||
l := s.root
|
|
||||||
if len(tokens) > 0 {
|
|
||||||
if tokens[0] == "/" {
|
|
||||||
if _, exist := l.nodes["#"]; exist {
|
|
||||||
addNodeToResults(l.nodes["#"], result)
|
|
||||||
}
|
|
||||||
if _, exist := l.nodes["+"]; exist {
|
|
||||||
matchLevel(l.nodes["/"].next, tokens[1:], result)
|
|
||||||
}
|
|
||||||
if _, exist := l.nodes["/"]; exist {
|
|
||||||
matchLevel(l.nodes["/"].next, tokens[1:], result)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
matchLevel(s.root, tokens, result)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
s.cache[topic] = result
|
|
||||||
if len(s.cache) > 1024 {
|
|
||||||
for k := range s.cache {
|
|
||||||
delete(s.cache, k)
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
s.Unlock()
|
|
||||||
// log.Info("SublistResult: ", result)
|
|
||||||
return result
|
|
||||||
}
|
|
||||||
|
|
||||||
func matchLevel(l *level, toks []string, results *SublistResult) {
|
|
||||||
var swc, n *node
|
|
||||||
exist := false
|
|
||||||
for i, t := range toks {
|
|
||||||
if l == nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if _, exist = l.nodes["#"]; exist {
|
|
||||||
addNodeToResults(l.nodes["#"], results)
|
|
||||||
}
|
|
||||||
if t != "/" {
|
|
||||||
if swc, exist = l.nodes["+"]; exist {
|
|
||||||
matchLevel(l.nodes["+"].next, toks[i+1:], results)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
if _, exist = l.nodes["+"]; exist {
|
|
||||||
addNodeToResults(l.nodes["+"], results)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
n = l.nodes[t]
|
|
||||||
if n != nil {
|
|
||||||
l = n.next
|
|
||||||
} else {
|
|
||||||
l = nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if n != nil {
|
|
||||||
addNodeToResults(n, results)
|
|
||||||
}
|
|
||||||
if swc != nil {
|
|
||||||
addNodeToResults(n, results)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// This will add in a node's results to the total results.
|
|
||||||
func addNodeToResults(n *node, results *SublistResult) {
|
|
||||||
results.psubs = append(results.psubs, n.psubs...)
|
|
||||||
results.qsubs = append(results.qsubs, n.qsubs...)
|
|
||||||
}
|
|
||||||
|
|
||||||
func removeSubFromList(sub *subscription, sl []*subscription) ([]*subscription, bool) {
|
|
||||||
for i := 0; i < len(sl); i++ {
|
|
||||||
if sl[i] == sub {
|
|
||||||
last := len(sl) - 1
|
|
||||||
sl[i] = sl[last]
|
|
||||||
sl[last] = nil
|
|
||||||
sl = sl[:last]
|
|
||||||
// log.Info("removeSubFromList success")
|
|
||||||
return shrinkAsNeeded(sl), true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return sl, false
|
|
||||||
}
|
|
||||||
|
|
||||||
// Checks if we need to do a resize. This is for very large growth then
|
|
||||||
// subsequent return to a more normal size from unsubscribe.
|
|
||||||
func shrinkAsNeeded(sl []*subscription) []*subscription {
|
|
||||||
lsl := len(sl)
|
|
||||||
csl := cap(sl)
|
|
||||||
// Don't bother if list not too big
|
|
||||||
if csl <= 8 {
|
|
||||||
return sl
|
|
||||||
}
|
|
||||||
pFree := float32(csl-lsl) / float32(csl)
|
|
||||||
if pFree > 0.50 {
|
|
||||||
return append([]*subscription(nil), sl...)
|
|
||||||
}
|
|
||||||
return sl
|
|
||||||
}
|
|
||||||
24
broker/usage.go
Normal file
24
broker/usage.go
Normal file
@@ -0,0 +1,24 @@
|
|||||||
|
package broker
|
||||||
|
|
||||||
|
var usageStr = `
|
||||||
|
Usage: hmq [options]
|
||||||
|
|
||||||
|
Broker Options:
|
||||||
|
-w, --worker <number> Worker num to process message, perfer (client num)/10. (default 1024)
|
||||||
|
-p, --port <port> Use port for clients (default: 1883)
|
||||||
|
--host <host> Network host to listen on. (default "0.0.0.0")
|
||||||
|
-ws, --wsport <port> Use port for websocket monitoring
|
||||||
|
-wsp,--wspath <path> Use path for websocket monitoring
|
||||||
|
-c, --config <file> Configuration file
|
||||||
|
|
||||||
|
Logging Options:
|
||||||
|
-d, --debug <bool> Enable debugging output (default false)
|
||||||
|
-D Debug and trace
|
||||||
|
|
||||||
|
Cluster Options:
|
||||||
|
-r, --router <rurl> Router who maintenance cluster info
|
||||||
|
-cp, --clusterport <cluster-port> Cluster listen port for others
|
||||||
|
|
||||||
|
Common Options:
|
||||||
|
-h, --help Show this message
|
||||||
|
`
|
||||||
@@ -1,37 +0,0 @@
|
|||||||
package broker
|
|
||||||
|
|
||||||
type Worker struct {
|
|
||||||
WorkerPool chan chan *Message
|
|
||||||
MsgChannel chan *Message
|
|
||||||
quit chan bool
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewWorker(workerPool chan chan *Message) Worker {
|
|
||||||
return Worker{
|
|
||||||
WorkerPool: workerPool,
|
|
||||||
MsgChannel: make(chan *Message),
|
|
||||||
quit: make(chan bool)}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w Worker) Start() {
|
|
||||||
go func() {
|
|
||||||
for {
|
|
||||||
// register the current worker into the worker queue.
|
|
||||||
w.WorkerPool <- w.MsgChannel
|
|
||||||
select {
|
|
||||||
case msg := <-w.MsgChannel:
|
|
||||||
// we have received a work request.
|
|
||||||
ProcessMessage(msg)
|
|
||||||
case <-w.quit:
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Stop signals the worker to stop listening for work requests.
|
|
||||||
func (w Worker) Stop() {
|
|
||||||
go func() {
|
|
||||||
w.quit <- true
|
|
||||||
}()
|
|
||||||
}
|
|
||||||
@@ -4,9 +4,9 @@
|
|||||||
"host": "0.0.0.0",
|
"host": "0.0.0.0",
|
||||||
"cluster": {
|
"cluster": {
|
||||||
"host": "0.0.0.0",
|
"host": "0.0.0.0",
|
||||||
"port": "1993",
|
"port": "1993"
|
||||||
"routes": []
|
|
||||||
},
|
},
|
||||||
|
"router": "127.0.0.1:9888",
|
||||||
"tlsPort": "8883",
|
"tlsPort": "8883",
|
||||||
"tlsHost": "0.0.0.0",
|
"tlsHost": "0.0.0.0",
|
||||||
"wsPort": "1888",
|
"wsPort": "1888",
|
||||||
@@ -1,3 +1,5 @@
|
|||||||
|
/* Copyright (c) 2018, joy.zhou <chowyu08@gmail.com>
|
||||||
|
*/
|
||||||
package acl
|
package acl
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
|||||||
@@ -1,3 +1,4 @@
|
|||||||
|
/* Copyright (c) 2018, joy.zhou <chowyu08@gmail.com>*/
|
||||||
package acl
|
package acl
|
||||||
|
|
||||||
import "strings"
|
import "strings"
|
||||||
|
|||||||
@@ -1,3 +1,5 @@
|
|||||||
|
/* Copyright (c) 2018, joy.zhou <chowyu08@gmail.com>
|
||||||
|
*/
|
||||||
package acl
|
package acl
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
|||||||
76
lib/sessions/memprovider.go
Normal file
76
lib/sessions/memprovider.go
Normal file
@@ -0,0 +1,76 @@
|
|||||||
|
// Copyright (c) 2014 The SurgeMQ Authors. All rights reserved.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package sessions
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"sync"
|
||||||
|
)
|
||||||
|
|
||||||
|
var _ SessionsProvider = (*memProvider)(nil)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
Register("mem", NewMemProvider())
|
||||||
|
}
|
||||||
|
|
||||||
|
type memProvider struct {
|
||||||
|
st map[string]*Session
|
||||||
|
mu sync.RWMutex
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewMemProvider() *memProvider {
|
||||||
|
return &memProvider{
|
||||||
|
st: make(map[string]*Session),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (this *memProvider) New(id string) (*Session, error) {
|
||||||
|
this.mu.Lock()
|
||||||
|
defer this.mu.Unlock()
|
||||||
|
|
||||||
|
this.st[id] = &Session{id: id}
|
||||||
|
return this.st[id], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (this *memProvider) Get(id string) (*Session, error) {
|
||||||
|
this.mu.RLock()
|
||||||
|
defer this.mu.RUnlock()
|
||||||
|
|
||||||
|
sess, ok := this.st[id]
|
||||||
|
if !ok {
|
||||||
|
return nil, fmt.Errorf("store/Get: No session found for key %s", id)
|
||||||
|
}
|
||||||
|
|
||||||
|
return sess, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (this *memProvider) Del(id string) {
|
||||||
|
this.mu.Lock()
|
||||||
|
defer this.mu.Unlock()
|
||||||
|
delete(this.st, id)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (this *memProvider) Save(id string) error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (this *memProvider) Count() int {
|
||||||
|
return len(this.st)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (this *memProvider) Close() error {
|
||||||
|
this.st = make(map[string]*Session)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
95
lib/sessions/redisprovider.go
Normal file
95
lib/sessions/redisprovider.go
Normal file
@@ -0,0 +1,95 @@
|
|||||||
|
package sessions
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
|
||||||
|
log "github.com/cihub/seelog"
|
||||||
|
"github.com/go-redis/redis"
|
||||||
|
jsoniter "github.com/json-iterator/go"
|
||||||
|
)
|
||||||
|
|
||||||
|
var redisClient *redis.Client
|
||||||
|
var _ SessionsProvider = (*redisProvider)(nil)
|
||||||
|
|
||||||
|
const (
|
||||||
|
sessionName = "session"
|
||||||
|
)
|
||||||
|
|
||||||
|
type redisProvider struct {
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
Register("redis", NewRedisProvider())
|
||||||
|
}
|
||||||
|
|
||||||
|
func InitRedisConn(url string) {
|
||||||
|
redisClient = redis.NewClient(&redis.Options{
|
||||||
|
Addr: "127.0.0.1:6379",
|
||||||
|
Password: "", // no password set
|
||||||
|
DB: 0, // use default DB
|
||||||
|
})
|
||||||
|
err := redisClient.Ping().Err()
|
||||||
|
for err != nil {
|
||||||
|
log.Error("connect redis error: ", err, " 3s try again...")
|
||||||
|
time.Sleep(3 * time.Second)
|
||||||
|
err = redisClient.Ping().Err()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewRedisProvider() *redisProvider {
|
||||||
|
return &redisProvider{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *redisProvider) New(id string) (*Session, error) {
|
||||||
|
val, _ := jsoniter.Marshal(&Session{id: id})
|
||||||
|
|
||||||
|
err := redisClient.HSet(sessionName, id, val).Err()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
result, err := redisClient.HGet(sessionName, id).Bytes()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
sess := Session{}
|
||||||
|
err = jsoniter.Unmarshal(result, &sess)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return &sess, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *redisProvider) Get(id string) (*Session, error) {
|
||||||
|
|
||||||
|
result, err := redisClient.HGet(sessionName, id).Bytes()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
sess := Session{}
|
||||||
|
err = jsoniter.Unmarshal(result, &sess)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return &sess, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *redisProvider) Del(id string) {
|
||||||
|
redisClient.HDel(sessionName, id)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *redisProvider) Save(id string) error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *redisProvider) Count() int {
|
||||||
|
return int(redisClient.HLen(sessionName).Val())
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *redisProvider) Close() error {
|
||||||
|
return redisClient.Del(sessionName).Err()
|
||||||
|
}
|
||||||
149
lib/sessions/session.go
Normal file
149
lib/sessions/session.go
Normal file
@@ -0,0 +1,149 @@
|
|||||||
|
package sessions
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"github.com/eclipse/paho.mqtt.golang/packets"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// Queue size for the ack queue
|
||||||
|
defaultQueueSize = 16
|
||||||
|
)
|
||||||
|
|
||||||
|
type Session struct {
|
||||||
|
|
||||||
|
// cmsg is the CONNECT message
|
||||||
|
cmsg *packets.ConnectPacket
|
||||||
|
|
||||||
|
// Will message to publish if connect is closed unexpectedly
|
||||||
|
Will *packets.PublishPacket
|
||||||
|
|
||||||
|
// Retained publish message
|
||||||
|
Retained *packets.PublishPacket
|
||||||
|
|
||||||
|
// topics stores all the topis for this session/client
|
||||||
|
topics map[string]byte
|
||||||
|
|
||||||
|
// Initialized?
|
||||||
|
initted bool
|
||||||
|
|
||||||
|
// Serialize access to this session
|
||||||
|
mu sync.Mutex
|
||||||
|
|
||||||
|
id string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (this *Session) Init(msg *packets.ConnectPacket) error {
|
||||||
|
this.mu.Lock()
|
||||||
|
defer this.mu.Unlock()
|
||||||
|
|
||||||
|
if this.initted {
|
||||||
|
return fmt.Errorf("Session already initialized")
|
||||||
|
}
|
||||||
|
|
||||||
|
this.cmsg = msg
|
||||||
|
|
||||||
|
if this.cmsg.WillFlag {
|
||||||
|
this.Will = packets.NewControlPacket(packets.Publish).(*packets.PublishPacket)
|
||||||
|
this.Will.Qos = this.cmsg.Qos
|
||||||
|
this.Will.TopicName = this.cmsg.WillTopic
|
||||||
|
this.Will.Payload = this.cmsg.WillMessage
|
||||||
|
this.Will.Retain = this.cmsg.WillRetain
|
||||||
|
}
|
||||||
|
|
||||||
|
this.topics = make(map[string]byte, 1)
|
||||||
|
|
||||||
|
this.id = string(msg.ClientIdentifier)
|
||||||
|
|
||||||
|
this.initted = true
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (this *Session) Update(msg *packets.ConnectPacket) error {
|
||||||
|
this.mu.Lock()
|
||||||
|
defer this.mu.Unlock()
|
||||||
|
|
||||||
|
this.cmsg = msg
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (this *Session) RetainMessage(msg *packets.PublishPacket) error {
|
||||||
|
this.mu.Lock()
|
||||||
|
defer this.mu.Unlock()
|
||||||
|
|
||||||
|
this.Retained = msg
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (this *Session) AddTopic(topic string, qos byte) error {
|
||||||
|
this.mu.Lock()
|
||||||
|
defer this.mu.Unlock()
|
||||||
|
|
||||||
|
if !this.initted {
|
||||||
|
return fmt.Errorf("Session not yet initialized")
|
||||||
|
}
|
||||||
|
|
||||||
|
this.topics[topic] = qos
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (this *Session) RemoveTopic(topic string) error {
|
||||||
|
this.mu.Lock()
|
||||||
|
defer this.mu.Unlock()
|
||||||
|
|
||||||
|
if !this.initted {
|
||||||
|
return fmt.Errorf("Session not yet initialized")
|
||||||
|
}
|
||||||
|
|
||||||
|
delete(this.topics, topic)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (this *Session) Topics() ([]string, []byte, error) {
|
||||||
|
this.mu.Lock()
|
||||||
|
defer this.mu.Unlock()
|
||||||
|
|
||||||
|
if !this.initted {
|
||||||
|
return nil, nil, fmt.Errorf("Session not yet initialized")
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
topics []string
|
||||||
|
qoss []byte
|
||||||
|
)
|
||||||
|
|
||||||
|
for k, v := range this.topics {
|
||||||
|
topics = append(topics, k)
|
||||||
|
qoss = append(qoss, v)
|
||||||
|
}
|
||||||
|
|
||||||
|
return topics, qoss, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (this *Session) ID() string {
|
||||||
|
return this.cmsg.ClientIdentifier
|
||||||
|
}
|
||||||
|
|
||||||
|
func (this *Session) WillFlag() bool {
|
||||||
|
this.mu.Lock()
|
||||||
|
defer this.mu.Unlock()
|
||||||
|
return this.cmsg.WillFlag
|
||||||
|
}
|
||||||
|
|
||||||
|
func (this *Session) SetWillFlag(v bool) {
|
||||||
|
this.mu.Lock()
|
||||||
|
defer this.mu.Unlock()
|
||||||
|
this.cmsg.WillFlag = v
|
||||||
|
}
|
||||||
|
|
||||||
|
func (this *Session) CleanSession() bool {
|
||||||
|
this.mu.Lock()
|
||||||
|
defer this.mu.Unlock()
|
||||||
|
return this.cmsg.CleanSession
|
||||||
|
}
|
||||||
92
lib/sessions/sessions.go
Normal file
92
lib/sessions/sessions.go
Normal file
@@ -0,0 +1,92 @@
|
|||||||
|
package sessions
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/rand"
|
||||||
|
"encoding/base64"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
ErrSessionsProviderNotFound = errors.New("Session: Session provider not found")
|
||||||
|
ErrKeyNotAvailable = errors.New("Session: not item found for key.")
|
||||||
|
|
||||||
|
providers = make(map[string]SessionsProvider)
|
||||||
|
)
|
||||||
|
|
||||||
|
type SessionsProvider interface {
|
||||||
|
New(id string) (*Session, error)
|
||||||
|
Get(id string) (*Session, error)
|
||||||
|
Del(id string)
|
||||||
|
Save(id string) error
|
||||||
|
Count() int
|
||||||
|
Close() error
|
||||||
|
}
|
||||||
|
|
||||||
|
// Register makes a session provider available by the provided name.
|
||||||
|
// If a Register is called twice with the same name or if the driver is nil,
|
||||||
|
// it panics.
|
||||||
|
func Register(name string, provider SessionsProvider) {
|
||||||
|
if provider == nil {
|
||||||
|
panic("session: Register provide is nil")
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, dup := providers[name]; dup {
|
||||||
|
panic("session: Register called twice for provider " + name)
|
||||||
|
}
|
||||||
|
|
||||||
|
providers[name] = provider
|
||||||
|
}
|
||||||
|
|
||||||
|
func Unregister(name string) {
|
||||||
|
delete(providers, name)
|
||||||
|
}
|
||||||
|
|
||||||
|
type Manager struct {
|
||||||
|
p SessionsProvider
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewManager(providerName string) (*Manager, error) {
|
||||||
|
p, ok := providers[providerName]
|
||||||
|
if !ok {
|
||||||
|
return nil, fmt.Errorf("session: unknown provider %q", providerName)
|
||||||
|
}
|
||||||
|
|
||||||
|
return &Manager{p: p}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (this *Manager) New(id string) (*Session, error) {
|
||||||
|
if id == "" {
|
||||||
|
id = this.sessionId()
|
||||||
|
}
|
||||||
|
return this.p.New(id)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (this *Manager) Get(id string) (*Session, error) {
|
||||||
|
return this.p.Get(id)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (this *Manager) Del(id string) {
|
||||||
|
this.p.Del(id)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (this *Manager) Save(id string) error {
|
||||||
|
return this.p.Save(id)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (this *Manager) Count() int {
|
||||||
|
return this.p.Count()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (this *Manager) Close() error {
|
||||||
|
return this.p.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (manager *Manager) sessionId() string {
|
||||||
|
b := make([]byte, 15)
|
||||||
|
if _, err := io.ReadFull(rand.Reader, b); err != nil {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
return base64.URLEncoding.EncodeToString(b)
|
||||||
|
}
|
||||||
549
lib/topics/memtopics.go
Normal file
549
lib/topics/memtopics.go
Normal file
@@ -0,0 +1,549 @@
|
|||||||
|
package topics
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"reflect"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"github.com/eclipse/paho.mqtt.golang/packets"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
QosAtMostOnce byte = iota
|
||||||
|
QosAtLeastOnce
|
||||||
|
QosExactlyOnce
|
||||||
|
QosFailure = 0x80
|
||||||
|
)
|
||||||
|
|
||||||
|
var _ TopicsProvider = (*memTopics)(nil)
|
||||||
|
|
||||||
|
type memTopics struct {
|
||||||
|
// Sub/unsub mutex
|
||||||
|
smu sync.RWMutex
|
||||||
|
// Subscription tree
|
||||||
|
sroot *snode
|
||||||
|
|
||||||
|
// Retained message mutex
|
||||||
|
rmu sync.RWMutex
|
||||||
|
// Retained messages topic tree
|
||||||
|
rroot *rnode
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
Register("mem", NewMemProvider())
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewMemProvider returns an new instance of the memTopics, which is implements the
|
||||||
|
// TopicsProvider interface. memProvider is a hidden struct that stores the topic
|
||||||
|
// subscriptions and retained messages in memory. The content is not persistend so
|
||||||
|
// when the server goes, everything will be gone. Use with care.
|
||||||
|
func NewMemProvider() *memTopics {
|
||||||
|
return &memTopics{
|
||||||
|
sroot: newSNode(),
|
||||||
|
rroot: newRNode(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func ValidQos(qos byte) bool {
|
||||||
|
return qos == QosAtMostOnce || qos == QosAtLeastOnce || qos == QosExactlyOnce
|
||||||
|
}
|
||||||
|
|
||||||
|
func (this *memTopics) Subscribe(topic []byte, qos byte, sub interface{}) (byte, error) {
|
||||||
|
if !ValidQos(qos) {
|
||||||
|
return QosFailure, fmt.Errorf("Invalid QoS %d", qos)
|
||||||
|
}
|
||||||
|
|
||||||
|
if sub == nil {
|
||||||
|
return QosFailure, fmt.Errorf("Subscriber cannot be nil")
|
||||||
|
}
|
||||||
|
|
||||||
|
this.smu.Lock()
|
||||||
|
defer this.smu.Unlock()
|
||||||
|
|
||||||
|
if qos > QosExactlyOnce {
|
||||||
|
qos = QosExactlyOnce
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := this.sroot.sinsert(topic, qos, sub); err != nil {
|
||||||
|
return QosFailure, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return qos, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (this *memTopics) Unsubscribe(topic []byte, sub interface{}) error {
|
||||||
|
this.smu.Lock()
|
||||||
|
defer this.smu.Unlock()
|
||||||
|
|
||||||
|
return this.sroot.sremove(topic, sub)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Returned values will be invalidated by the next Subscribers call
|
||||||
|
func (this *memTopics) Subscribers(topic []byte, qos byte, subs *[]interface{}, qoss *[]byte) error {
|
||||||
|
if !ValidQos(qos) {
|
||||||
|
return fmt.Errorf("Invalid QoS %d", qos)
|
||||||
|
}
|
||||||
|
|
||||||
|
this.smu.RLock()
|
||||||
|
defer this.smu.RUnlock()
|
||||||
|
|
||||||
|
*subs = (*subs)[0:0]
|
||||||
|
*qoss = (*qoss)[0:0]
|
||||||
|
|
||||||
|
return this.sroot.smatch(topic, qos, subs, qoss)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (this *memTopics) Retain(msg *packets.PublishPacket) error {
|
||||||
|
this.rmu.Lock()
|
||||||
|
defer this.rmu.Unlock()
|
||||||
|
|
||||||
|
// So apparently, at least according to the MQTT Conformance/Interoperability
|
||||||
|
// Testing, that a payload of 0 means delete the retain message.
|
||||||
|
// https://eclipse.org/paho/clients/testing/
|
||||||
|
if len(msg.Payload) == 0 {
|
||||||
|
return this.rroot.rremove([]byte(msg.TopicName))
|
||||||
|
}
|
||||||
|
|
||||||
|
return this.rroot.rinsert([]byte(msg.TopicName), msg)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (this *memTopics) Retained(topic []byte, msgs *[]*packets.PublishPacket) error {
|
||||||
|
this.rmu.RLock()
|
||||||
|
defer this.rmu.RUnlock()
|
||||||
|
|
||||||
|
return this.rroot.rmatch(topic, msgs)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (this *memTopics) Close() error {
|
||||||
|
this.sroot = nil
|
||||||
|
this.rroot = nil
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// subscrition nodes
|
||||||
|
type snode struct {
|
||||||
|
// If this is the end of the topic string, then add subscribers here
|
||||||
|
subs []interface{}
|
||||||
|
qos []byte
|
||||||
|
|
||||||
|
// Otherwise add the next topic level here
|
||||||
|
snodes map[string]*snode
|
||||||
|
}
|
||||||
|
|
||||||
|
func newSNode() *snode {
|
||||||
|
return &snode{
|
||||||
|
snodes: make(map[string]*snode),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (this *snode) sinsert(topic []byte, qos byte, sub interface{}) error {
|
||||||
|
// If there's no more topic levels, that means we are at the matching snode
|
||||||
|
// to insert the subscriber. So let's see if there's such subscriber,
|
||||||
|
// if so, update it. Otherwise insert it.
|
||||||
|
if len(topic) == 0 {
|
||||||
|
// Let's see if the subscriber is already on the list. If yes, update
|
||||||
|
// QoS and then return.
|
||||||
|
for i := range this.subs {
|
||||||
|
if equal(this.subs[i], sub) {
|
||||||
|
this.qos[i] = qos
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Otherwise add.
|
||||||
|
this.subs = append(this.subs, sub)
|
||||||
|
this.qos = append(this.qos, qos)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Not the last level, so let's find or create the next level snode, and
|
||||||
|
// recursively call it's insert().
|
||||||
|
|
||||||
|
// ntl = next topic level
|
||||||
|
ntl, rem, err := nextTopicLevel(topic)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
level := string(ntl)
|
||||||
|
|
||||||
|
// Add snode if it doesn't already exist
|
||||||
|
n, ok := this.snodes[level]
|
||||||
|
if !ok {
|
||||||
|
n = newSNode()
|
||||||
|
this.snodes[level] = n
|
||||||
|
}
|
||||||
|
|
||||||
|
return n.sinsert(rem, qos, sub)
|
||||||
|
}
|
||||||
|
|
||||||
|
// This remove implementation ignores the QoS, as long as the subscriber
|
||||||
|
// matches then it's removed
|
||||||
|
func (this *snode) sremove(topic []byte, sub interface{}) error {
|
||||||
|
// If the topic is empty, it means we are at the final matching snode. If so,
|
||||||
|
// let's find the matching subscribers and remove them.
|
||||||
|
if len(topic) == 0 {
|
||||||
|
// If subscriber == nil, then it's signal to remove ALL subscribers
|
||||||
|
if sub == nil {
|
||||||
|
this.subs = this.subs[0:0]
|
||||||
|
this.qos = this.qos[0:0]
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// If we find the subscriber then remove it from the list. Technically
|
||||||
|
// we just overwrite the slot by shifting all other items up by one.
|
||||||
|
for i := range this.subs {
|
||||||
|
if equal(this.subs[i], sub) {
|
||||||
|
this.subs = append(this.subs[:i], this.subs[i+1:]...)
|
||||||
|
this.qos = append(this.qos[:i], this.qos[i+1:]...)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return fmt.Errorf("No topic found for subscriber")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Not the last level, so let's find the next level snode, and recursively
|
||||||
|
// call it's remove().
|
||||||
|
|
||||||
|
// ntl = next topic level
|
||||||
|
ntl, rem, err := nextTopicLevel(topic)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
level := string(ntl)
|
||||||
|
|
||||||
|
// Find the snode that matches the topic level
|
||||||
|
n, ok := this.snodes[level]
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("No topic found")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove the subscriber from the next level snode
|
||||||
|
if err := n.sremove(rem, sub); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// If there are no more subscribers and snodes to the next level we just visited
|
||||||
|
// let's remove it
|
||||||
|
if len(n.subs) == 0 && len(n.snodes) == 0 {
|
||||||
|
delete(this.snodes, level)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// smatch() returns all the subscribers that are subscribed to the topic. Given a topic
|
||||||
|
// with no wildcards (publish topic), it returns a list of subscribers that subscribes
|
||||||
|
// to the topic. For each of the level names, it's a match
|
||||||
|
// - if there are subscribers to '#', then all the subscribers are added to result set
|
||||||
|
func (this *snode) smatch(topic []byte, qos byte, subs *[]interface{}, qoss *[]byte) error {
|
||||||
|
// If the topic is empty, it means we are at the final matching snode. If so,
|
||||||
|
// let's find the subscribers that match the qos and append them to the list.
|
||||||
|
if len(topic) == 0 {
|
||||||
|
this.matchQos(qos, subs, qoss)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ntl = next topic level
|
||||||
|
ntl, rem, err := nextTopicLevel(topic)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
level := string(ntl)
|
||||||
|
|
||||||
|
for k, n := range this.snodes {
|
||||||
|
// If the key is "#", then these subscribers are added to the result set
|
||||||
|
if k == MWC {
|
||||||
|
n.matchQos(qos, subs, qoss)
|
||||||
|
} else if k == SWC || k == level {
|
||||||
|
if err := n.smatch(rem, qos, subs, qoss); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// retained message nodes
|
||||||
|
type rnode struct {
|
||||||
|
// If this is the end of the topic string, then add retained messages here
|
||||||
|
msg *packets.PublishPacket
|
||||||
|
// Otherwise add the next topic level here
|
||||||
|
rnodes map[string]*rnode
|
||||||
|
}
|
||||||
|
|
||||||
|
func newRNode() *rnode {
|
||||||
|
return &rnode{
|
||||||
|
rnodes: make(map[string]*rnode),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (this *rnode) rinsert(topic []byte, msg *packets.PublishPacket) error {
|
||||||
|
// If there's no more topic levels, that means we are at the matching rnode.
|
||||||
|
if len(topic) == 0 {
|
||||||
|
// Reuse the message if possible
|
||||||
|
if this.msg == nil {
|
||||||
|
this.msg = msg
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Not the last level, so let's find or create the next level snode, and
|
||||||
|
// recursively call it's insert().
|
||||||
|
|
||||||
|
// ntl = next topic level
|
||||||
|
ntl, rem, err := nextTopicLevel(topic)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
level := string(ntl)
|
||||||
|
|
||||||
|
// Add snode if it doesn't already exist
|
||||||
|
n, ok := this.rnodes[level]
|
||||||
|
if !ok {
|
||||||
|
n = newRNode()
|
||||||
|
this.rnodes[level] = n
|
||||||
|
}
|
||||||
|
|
||||||
|
return n.rinsert(rem, msg)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove the retained message for the supplied topic
|
||||||
|
func (this *rnode) rremove(topic []byte) error {
|
||||||
|
// If the topic is empty, it means we are at the final matching rnode. If so,
|
||||||
|
// let's remove the buffer and message.
|
||||||
|
if len(topic) == 0 {
|
||||||
|
this.msg = nil
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Not the last level, so let's find the next level rnode, and recursively
|
||||||
|
// call it's remove().
|
||||||
|
|
||||||
|
// ntl = next topic level
|
||||||
|
ntl, rem, err := nextTopicLevel(topic)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
level := string(ntl)
|
||||||
|
|
||||||
|
// Find the rnode that matches the topic level
|
||||||
|
n, ok := this.rnodes[level]
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("No topic found")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove the subscriber from the next level rnode
|
||||||
|
if err := n.rremove(rem); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// If there are no more rnodes to the next level we just visited let's remove it
|
||||||
|
if len(n.rnodes) == 0 {
|
||||||
|
delete(this.rnodes, level)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// rmatch() finds the retained messages for the topic and qos provided. It's somewhat
|
||||||
|
// of a reverse match compare to match() since the supplied topic can contain
|
||||||
|
// wildcards, whereas the retained message topic is a full (no wildcard) topic.
|
||||||
|
func (this *rnode) rmatch(topic []byte, msgs *[]*packets.PublishPacket) error {
|
||||||
|
// If the topic is empty, it means we are at the final matching rnode. If so,
|
||||||
|
// add the retained msg to the list.
|
||||||
|
if len(topic) == 0 {
|
||||||
|
if this.msg != nil {
|
||||||
|
*msgs = append(*msgs, this.msg)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ntl = next topic level
|
||||||
|
ntl, rem, err := nextTopicLevel(topic)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
level := string(ntl)
|
||||||
|
|
||||||
|
if level == MWC {
|
||||||
|
// If '#', add all retained messages starting this node
|
||||||
|
this.allRetained(msgs)
|
||||||
|
} else if level == SWC {
|
||||||
|
// If '+', check all nodes at this level. Next levels must be matched.
|
||||||
|
for _, n := range this.rnodes {
|
||||||
|
if err := n.rmatch(rem, msgs); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// Otherwise, find the matching node, go to the next level
|
||||||
|
if n, ok := this.rnodes[level]; ok {
|
||||||
|
if err := n.rmatch(rem, msgs); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (this *rnode) allRetained(msgs *[]*packets.PublishPacket) {
|
||||||
|
if this.msg != nil {
|
||||||
|
*msgs = append(*msgs, this.msg)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, n := range this.rnodes {
|
||||||
|
n.allRetained(msgs)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
stateCHR byte = iota // Regular character
|
||||||
|
stateMWC // Multi-level wildcard
|
||||||
|
stateSWC // Single-level wildcard
|
||||||
|
stateSEP // Topic level separator
|
||||||
|
stateSYS // System level topic ($)
|
||||||
|
)
|
||||||
|
|
||||||
|
// Returns topic level, remaining topic levels and any errors
|
||||||
|
func nextTopicLevel(topic []byte) ([]byte, []byte, error) {
|
||||||
|
s := stateCHR
|
||||||
|
|
||||||
|
for i, c := range topic {
|
||||||
|
switch c {
|
||||||
|
case '/':
|
||||||
|
if s == stateMWC {
|
||||||
|
return nil, nil, fmt.Errorf("Multi-level wildcard found in topic and it's not at the last level")
|
||||||
|
}
|
||||||
|
|
||||||
|
if i == 0 {
|
||||||
|
return []byte(SWC), topic[i+1:], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return topic[:i], topic[i+1:], nil
|
||||||
|
|
||||||
|
case '#':
|
||||||
|
if i != 0 {
|
||||||
|
return nil, nil, fmt.Errorf("Wildcard character '#' must occupy entire topic level")
|
||||||
|
}
|
||||||
|
|
||||||
|
s = stateMWC
|
||||||
|
|
||||||
|
case '+':
|
||||||
|
if i != 0 {
|
||||||
|
return nil, nil, fmt.Errorf("Wildcard character '+' must occupy entire topic level")
|
||||||
|
}
|
||||||
|
|
||||||
|
s = stateSWC
|
||||||
|
|
||||||
|
// case '$':
|
||||||
|
// if i == 0 {
|
||||||
|
// return nil, nil, fmt.Errorf("Cannot publish to $ topics")
|
||||||
|
// }
|
||||||
|
|
||||||
|
// s = stateSYS
|
||||||
|
|
||||||
|
default:
|
||||||
|
if s == stateMWC || s == stateSWC {
|
||||||
|
return nil, nil, fmt.Errorf("Wildcard characters '#' and '+' must occupy entire topic level")
|
||||||
|
}
|
||||||
|
|
||||||
|
s = stateCHR
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// If we got here that means we didn't hit the separator along the way, so the
|
||||||
|
// topic is either empty, or does not contain a separator. Either way, we return
|
||||||
|
// the full topic
|
||||||
|
return topic, nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// The QoS of the payload messages sent in response to a subscription must be the
|
||||||
|
// minimum of the QoS of the originally published message (in this case, it's the
|
||||||
|
// qos parameter) and the maximum QoS granted by the server (in this case, it's
|
||||||
|
// the QoS in the topic tree).
|
||||||
|
//
|
||||||
|
// It's also possible that even if the topic matches, the subscriber is not included
|
||||||
|
// due to the QoS granted is lower than the published message QoS. For example,
|
||||||
|
// if the client is granted only QoS 0, and the publish message is QoS 1, then this
|
||||||
|
// client is not to be send the published message.
|
||||||
|
func (this *snode) matchQos(qos byte, subs *[]interface{}, qoss *[]byte) {
|
||||||
|
for _, sub := range this.subs {
|
||||||
|
// If the published QoS is higher than the subscriber QoS, then we skip the
|
||||||
|
// subscriber. Otherwise, add to the list.
|
||||||
|
// if qos >= this.qos[i] {
|
||||||
|
*subs = append(*subs, sub)
|
||||||
|
*qoss = append(*qoss, qos)
|
||||||
|
// }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func equal(k1, k2 interface{}) bool {
|
||||||
|
if reflect.TypeOf(k1) != reflect.TypeOf(k2) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
if reflect.ValueOf(k1).Kind() == reflect.Func {
|
||||||
|
return &k1 == &k2
|
||||||
|
}
|
||||||
|
|
||||||
|
if k1 == k2 {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
switch k1 := k1.(type) {
|
||||||
|
case string:
|
||||||
|
return k1 == k2.(string)
|
||||||
|
|
||||||
|
case int64:
|
||||||
|
return k1 == k2.(int64)
|
||||||
|
|
||||||
|
case int32:
|
||||||
|
return k1 == k2.(int32)
|
||||||
|
|
||||||
|
case int16:
|
||||||
|
return k1 == k2.(int16)
|
||||||
|
|
||||||
|
case int8:
|
||||||
|
return k1 == k2.(int8)
|
||||||
|
|
||||||
|
case int:
|
||||||
|
return k1 == k2.(int)
|
||||||
|
|
||||||
|
case float32:
|
||||||
|
return k1 == k2.(float32)
|
||||||
|
|
||||||
|
case float64:
|
||||||
|
return k1 == k2.(float64)
|
||||||
|
|
||||||
|
case uint:
|
||||||
|
return k1 == k2.(uint)
|
||||||
|
|
||||||
|
case uint8:
|
||||||
|
return k1 == k2.(uint8)
|
||||||
|
|
||||||
|
case uint16:
|
||||||
|
return k1 == k2.(uint16)
|
||||||
|
|
||||||
|
case uint32:
|
||||||
|
return k1 == k2.(uint32)
|
||||||
|
|
||||||
|
case uint64:
|
||||||
|
return k1 == k2.(uint64)
|
||||||
|
|
||||||
|
case uintptr:
|
||||||
|
return k1 == k2.(uintptr)
|
||||||
|
}
|
||||||
|
|
||||||
|
return false
|
||||||
|
}
|
||||||
91
lib/topics/topics.go
Normal file
91
lib/topics/topics.go
Normal file
@@ -0,0 +1,91 @@
|
|||||||
|
package topics
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/eclipse/paho.mqtt.golang/packets"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// MWC is the multi-level wildcard
|
||||||
|
MWC = "#"
|
||||||
|
|
||||||
|
// SWC is the single level wildcard
|
||||||
|
SWC = "+"
|
||||||
|
|
||||||
|
// SEP is the topic level separator
|
||||||
|
SEP = "/"
|
||||||
|
|
||||||
|
// SYS is the starting character of the system level topics
|
||||||
|
SYS = "$"
|
||||||
|
|
||||||
|
// Both wildcards
|
||||||
|
_WC = "#+"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
providers = make(map[string]TopicsProvider)
|
||||||
|
)
|
||||||
|
|
||||||
|
// TopicsProvider
|
||||||
|
type TopicsProvider interface {
|
||||||
|
Subscribe(topic []byte, qos byte, subscriber interface{}) (byte, error)
|
||||||
|
Unsubscribe(topic []byte, subscriber interface{}) error
|
||||||
|
Subscribers(topic []byte, qos byte, subs *[]interface{}, qoss *[]byte) error
|
||||||
|
Retain(msg *packets.PublishPacket) error
|
||||||
|
Retained(topic []byte, msgs *[]*packets.PublishPacket) error
|
||||||
|
Close() error
|
||||||
|
}
|
||||||
|
|
||||||
|
func Register(name string, provider TopicsProvider) {
|
||||||
|
if provider == nil {
|
||||||
|
panic("topics: Register provide is nil")
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, dup := providers[name]; dup {
|
||||||
|
panic("topics: Register called twice for provider " + name)
|
||||||
|
}
|
||||||
|
|
||||||
|
providers[name] = provider
|
||||||
|
}
|
||||||
|
|
||||||
|
func Unregister(name string) {
|
||||||
|
delete(providers, name)
|
||||||
|
}
|
||||||
|
|
||||||
|
type Manager struct {
|
||||||
|
p TopicsProvider
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewManager(providerName string) (*Manager, error) {
|
||||||
|
p, ok := providers[providerName]
|
||||||
|
if !ok {
|
||||||
|
return nil, fmt.Errorf("session: unknown provider %q", providerName)
|
||||||
|
}
|
||||||
|
|
||||||
|
return &Manager{p: p}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (this *Manager) Subscribe(topic []byte, qos byte, subscriber interface{}) (byte, error) {
|
||||||
|
return this.p.Subscribe(topic, qos, subscriber)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (this *Manager) Unsubscribe(topic []byte, subscriber interface{}) error {
|
||||||
|
return this.p.Unsubscribe(topic, subscriber)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (this *Manager) Subscribers(topic []byte, qos byte, subs *[]interface{}, qoss *[]byte) error {
|
||||||
|
return this.p.Subscribers(topic, qos, subs, qoss)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (this *Manager) Retain(msg *packets.PublishPacket) error {
|
||||||
|
return this.p.Retain(msg)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (this *Manager) Retained(topic []byte, msgs *[]*packets.PublishPacket) error {
|
||||||
|
return this.p.Retained(topic, msgs)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (this *Manager) Close() error {
|
||||||
|
return this.p.Close()
|
||||||
|
}
|
||||||
50
logger/logger.go
Normal file
50
logger/logger.go
Normal file
@@ -0,0 +1,50 @@
|
|||||||
|
/* Copyright (c) 2018, joy.zhou <chowyu08@gmail.com>
|
||||||
|
*/
|
||||||
|
|
||||||
|
package logger
|
||||||
|
|
||||||
|
import (
|
||||||
|
"go.uber.org/zap"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// env can be setup at build time with Go Linker. Value could be prod or whatever else for dev env
|
||||||
|
instance *zap.Logger
|
||||||
|
logCfg zap.Config
|
||||||
|
)
|
||||||
|
|
||||||
|
// NewDevLogger return a logger for dev builds
|
||||||
|
func NewDevLogger() (*zap.Logger, error) {
|
||||||
|
logCfg := zap.NewDevelopmentConfig()
|
||||||
|
return logCfg.Build()
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewProdLogger return a logger for production builds
|
||||||
|
func NewProdLogger() (*zap.Logger, error) {
|
||||||
|
logCfg := zap.NewProductionConfig()
|
||||||
|
logCfg.DisableStacktrace = true
|
||||||
|
logCfg.Level = zap.NewAtomicLevelAt(zap.InfoLevel)
|
||||||
|
return logCfg.Build()
|
||||||
|
}
|
||||||
|
|
||||||
|
func InitLogger(debug bool) {
|
||||||
|
var err error
|
||||||
|
var log *zap.Logger
|
||||||
|
if debug {
|
||||||
|
log, err = NewDevLogger()
|
||||||
|
} else {
|
||||||
|
log, err = NewProdLogger()
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
panic("Unable to create a logger.")
|
||||||
|
}
|
||||||
|
defer log.Sync()
|
||||||
|
|
||||||
|
log.Debug("Logger initialization succeeded")
|
||||||
|
instance = log.Named("hmq")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get return a *zap.Logger instance
|
||||||
|
func Get() *zap.Logger {
|
||||||
|
return instance
|
||||||
|
}
|
||||||
32
logger/logger_test.go
Normal file
32
logger/logger_test.go
Normal file
@@ -0,0 +1,32 @@
|
|||||||
|
/* Copyright (c) 2018, joy.zhou <chowyu08@gmail.com>
|
||||||
|
*/
|
||||||
|
package logger
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"go.uber.org/zap"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestGet(t *testing.T) {
|
||||||
|
var l *zap.Logger
|
||||||
|
logger := Get()
|
||||||
|
|
||||||
|
assert.NotNil(t, logger)
|
||||||
|
assert.IsType(t, l, logger)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestNewDevLogger(t *testing.T) {
|
||||||
|
logger, err := NewDevLogger()
|
||||||
|
|
||||||
|
assert.Nil(t, err)
|
||||||
|
assert.True(t, logger.Core().Enabled(zap.DebugLevel))
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestNewProdLogger(t *testing.T) {
|
||||||
|
logger, err := NewProdLogger()
|
||||||
|
|
||||||
|
assert.Nil(t, err)
|
||||||
|
assert.False(t, logger.Core().Enabled(zap.DebugLevel))
|
||||||
|
}
|
||||||
22
main.go
22
main.go
@@ -1,33 +1,37 @@
|
|||||||
|
/* Copyright (c) 2018, joy.zhou <chowyu08@gmail.com>
|
||||||
|
|
||||||
|
Permission to use, copy, modify, and/or distribute this software for any
|
||||||
|
purpose with or without fee is hereby granted, provided that the above
|
||||||
|
copyright notice and this permission notice appear in all copies.
|
||||||
|
*/
|
||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"hmq/broker"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
"os/signal"
|
"os/signal"
|
||||||
"runtime"
|
"runtime"
|
||||||
|
|
||||||
log "github.com/cihub/seelog"
|
"github.com/fhmq/hmq/broker"
|
||||||
)
|
)
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
runtime.GOMAXPROCS(runtime.NumCPU())
|
runtime.GOMAXPROCS(runtime.NumCPU())
|
||||||
runtime.GC()
|
config, err := broker.ConfigureConfig(os.Args[1:])
|
||||||
config, er := broker.LoadConfig()
|
if err != nil {
|
||||||
if er != nil {
|
fmt.Println("configure broker config error: ", err)
|
||||||
log.Error("Load Config file error: ", er)
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
broker.StartDispatcher()
|
|
||||||
|
|
||||||
b, err := broker.NewBroker(config)
|
b, err := broker.NewBroker(config)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error("New Broker error: ", er)
|
fmt.Println("New Broker error: ", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
b.Start()
|
b.Start()
|
||||||
|
|
||||||
s := waitForSignal()
|
s := waitForSignal()
|
||||||
log.Infof("signal got: %v ,broker closed.", s)
|
fmt.Println("signal received, broker closed.", s)
|
||||||
}
|
}
|
||||||
|
|
||||||
func waitForSignal() os.Signal {
|
func waitForSignal() os.Signal {
|
||||||
|
|||||||
166
pool/pool.go
Normal file
166
pool/pool.go
Normal file
@@ -0,0 +1,166 @@
|
|||||||
|
package pool
|
||||||
|
|
||||||
|
import "time"
|
||||||
|
|
||||||
|
const (
|
||||||
|
// This value is the size of the queue that workers register their
|
||||||
|
// availability to the dispatcher. There may be hundreds of workers, but
|
||||||
|
// only a small channel is needed to register some of the workers.
|
||||||
|
readyQueueSize = 16
|
||||||
|
|
||||||
|
// If worker pool receives no new work for this period of time, then stop
|
||||||
|
// a worker goroutine.
|
||||||
|
idleTimeoutSec = 5
|
||||||
|
)
|
||||||
|
|
||||||
|
type WorkerPool struct {
|
||||||
|
maxWorkers int
|
||||||
|
timeout time.Duration
|
||||||
|
taskQueue chan func()
|
||||||
|
readyWorkers chan chan func()
|
||||||
|
stoppedChan chan struct{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func New(maxWorkers int) *WorkerPool {
|
||||||
|
// There must be at least one worker.
|
||||||
|
if maxWorkers < 1 {
|
||||||
|
maxWorkers = 1
|
||||||
|
}
|
||||||
|
|
||||||
|
// taskQueue is unbuffered since items are always removed immediately.
|
||||||
|
pool := &WorkerPool{
|
||||||
|
taskQueue: make(chan func()),
|
||||||
|
maxWorkers: maxWorkers,
|
||||||
|
readyWorkers: make(chan chan func(), readyQueueSize),
|
||||||
|
timeout: time.Second * idleTimeoutSec,
|
||||||
|
stoppedChan: make(chan struct{}),
|
||||||
|
}
|
||||||
|
|
||||||
|
// Start the task dispatcher.
|
||||||
|
go pool.dispatch()
|
||||||
|
|
||||||
|
return pool
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *WorkerPool) Stop() {
|
||||||
|
if p.Stopped() {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
close(p.taskQueue)
|
||||||
|
<-p.stoppedChan
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *WorkerPool) Stopped() bool {
|
||||||
|
select {
|
||||||
|
case <-p.stoppedChan:
|
||||||
|
return true
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *WorkerPool) Submit(task func()) {
|
||||||
|
if task != nil {
|
||||||
|
p.taskQueue <- task
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *WorkerPool) SubmitWait(task func()) {
|
||||||
|
if task == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
doneChan := make(chan struct{})
|
||||||
|
p.taskQueue <- func() {
|
||||||
|
task()
|
||||||
|
close(doneChan)
|
||||||
|
}
|
||||||
|
<-doneChan
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *WorkerPool) dispatch() {
|
||||||
|
defer close(p.stoppedChan)
|
||||||
|
timeout := time.NewTimer(p.timeout)
|
||||||
|
var workerCount int
|
||||||
|
var task func()
|
||||||
|
var ok bool
|
||||||
|
var workerTaskChan chan func()
|
||||||
|
startReady := make(chan chan func())
|
||||||
|
Loop:
|
||||||
|
for {
|
||||||
|
timeout.Reset(p.timeout)
|
||||||
|
select {
|
||||||
|
case task, ok = <-p.taskQueue:
|
||||||
|
if !ok {
|
||||||
|
break Loop
|
||||||
|
}
|
||||||
|
// Got a task to do.
|
||||||
|
select {
|
||||||
|
case workerTaskChan = <-p.readyWorkers:
|
||||||
|
// A worker is ready, so give task to worker.
|
||||||
|
workerTaskChan <- task
|
||||||
|
default:
|
||||||
|
// No workers ready.
|
||||||
|
// Create a new worker, if not at max.
|
||||||
|
if workerCount < p.maxWorkers {
|
||||||
|
workerCount++
|
||||||
|
go func(t func()) {
|
||||||
|
startWorker(startReady, p.readyWorkers)
|
||||||
|
// Submit the task when the new worker.
|
||||||
|
taskChan := <-startReady
|
||||||
|
taskChan <- t
|
||||||
|
}(task)
|
||||||
|
} else {
|
||||||
|
// Start a goroutine to submit the task when an existing
|
||||||
|
// worker is ready.
|
||||||
|
go func(t func()) {
|
||||||
|
taskChan := <-p.readyWorkers
|
||||||
|
taskChan <- t
|
||||||
|
}(task)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case <-timeout.C:
|
||||||
|
// Timed out waiting for work to arrive. Kill a ready worker.
|
||||||
|
if workerCount > 0 {
|
||||||
|
select {
|
||||||
|
case workerTaskChan = <-p.readyWorkers:
|
||||||
|
// A worker is ready, so kill.
|
||||||
|
close(workerTaskChan)
|
||||||
|
workerCount--
|
||||||
|
default:
|
||||||
|
// No work, but no ready workers. All workers are busy.
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Stop all remaining workers as they become ready.
|
||||||
|
for workerCount > 0 {
|
||||||
|
workerTaskChan = <-p.readyWorkers
|
||||||
|
close(workerTaskChan)
|
||||||
|
workerCount--
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func startWorker(startReady, readyWorkers chan chan func()) {
|
||||||
|
go func() {
|
||||||
|
taskChan := make(chan func())
|
||||||
|
var task func()
|
||||||
|
var ok bool
|
||||||
|
// Register availability on starReady channel.
|
||||||
|
startReady <- taskChan
|
||||||
|
for {
|
||||||
|
// Read task from dispatcher.
|
||||||
|
task, ok = <-taskChan
|
||||||
|
if !ok {
|
||||||
|
// Dispatcher has told worker to stop.
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
// Execute the task.
|
||||||
|
task()
|
||||||
|
|
||||||
|
// Register availability on readyWorkers channel.
|
||||||
|
readyWorkers <- taskChan
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
}
|
||||||
Reference in New Issue
Block a user