40 Commits

Author SHA1 Message Date
Marc Magnin
cf77eaf346 remove subscriptions when a client disconnect 2019-01-18 14:38:51 +01:00
Marc Magnin
7c4d7a0c06 simple fix 2019-01-03 21:23:14 +01:00
joyz
2b56664d85 remove no use 2018-12-27 21:22:32 +08:00
joy.zhou
7547ad3bdc Restruct (#34)
* modify

* remove

* modify

* modify

* remove no use

* add online/offline notification

* modify

* format log

* add reference
2018-12-26 14:51:13 +08:00
joy.zhou
84e7fe2490 context (#28) 2018-05-10 13:13:36 +08:00
zhouyuyan
684584b208 fix write logic 2018-04-28 09:37:37 +08:00
zhouyuyan
56fb4a2d54 fix issue 25 2018-04-28 09:08:28 +08:00
joy.zhou
5ed4728575 Wpool (#23)
* pool

* pool

* wpool
2018-04-04 13:49:52 +08:00
zhouyuyan
c0fea6a5ba modify_message_pool 2018-02-24 13:19:43 +08:00
zhouyuyan
47500910e1 fix broker out painc 2018-02-06 11:01:06 +08:00
joy.zhou
0ff20b6ee2 Update README.md 2018-02-03 13:11:53 +08:00
joy.zhou
7155667f6c Pool (#16)
* add pool

* elastic workerpool

* del buf

* modify usage

* modify readme
2018-02-03 12:42:25 +08:00
zhouyuyan
83db82cdcc Merge branch 'master' of https://github.com/fhmq/hmq 2018-01-31 11:00:29 +08:00
zhouyuyan
b3653bcfb1 fix #14 2018-01-31 10:59:59 +08:00
joy.zhou
221d00480e update read.me 2018-01-26 16:29:14 +08:00
zhouyuyan
91733bf91e modify debug log 2018-01-26 15:47:34 +08:00
Marc Magnin
ef252550dc fhmq/hmq#5 added zap logger (#11) 2018-01-26 13:51:36 +08:00
joy.zhou
1058256235 update readme 2018-01-25 19:34:37 +08:00
joy.zhou
5a569f14a3 del debug info
delete debug message body
2018-01-25 19:31:47 +08:00
zhouyuyan
93b21777ff add lisence 2018-01-25 13:47:50 +08:00
zhouyuyan
dcf2934e1b add flag for hmq 2018-01-25 13:11:45 +08:00
joy.zhou
d9e6e216b0 Merge pull request #4 from MarcMagnin/master
fhmq/hmq#2 added full package ref
2018-01-24 18:14:13 +08:00
Marc Magnin
ca3951769a fhmq/hmq#2 added full package ref 2018-01-23 15:29:16 +01:00
zhouyuyan
0439e7ce90 fxi ws conn 2018-01-22 09:30:08 +08:00
zhouyuyan
dc0f2185ab skip self 2018-01-19 13:53:47 +08:00
zhouyuyan
7462afcfb5 modify readme 2018-01-19 13:49:53 +08:00
zhouyuyan
114e6f901e modify cluster 2018-01-19 13:41:17 +08:00
zhouyuyan
0cb51bd37a Merge branch 'master' of https://github.com/fhmq/hmq 2018-01-18 09:18:38 +08:00
zhouyuyan
819b4725f2 modify route 2018-01-18 09:17:48 +08:00
joy.zhou
85bdeccbfc release link
addd down link
2018-01-17 21:39:31 +08:00
zhouyuyan
1339a04b28 modify Dockerfile 2018-01-17 10:11:36 +08:00
zhouyuyan
957329d85c modify Dockerfile 2018-01-17 10:10:04 +08:00
zhouyuyan
7db7edaa17 cluster fix 2018-01-17 09:39:07 +08:00
zhouyuyan
1d6f6a4a71 add cluster 2018-01-16 16:50:10 +08:00
zhouyuyan
123bb7210f move dispatcher 2018-01-02 10:55:28 +08:00
zhouyuyan
9ad6590e83 modify timer 2017-12-28 09:13:20 +08:00
zhouyuyan
516db49db5 modify keep alive 2017-12-27 16:42:38 +08:00
zhouyuyan
a260057bfe modify time close 2017-12-08 13:25:05 +08:00
zhouyuyan
bdd802ebfb modify log 2017-12-07 16:30:48 +08:00
zhouyuyan
5786e69b01 modify cluster logic 2017-11-21 14:05:06 +08:00
32 changed files with 2261 additions and 1079 deletions

1
.gitignore vendored
View File

@@ -1,3 +1,4 @@
hmq
log
log/*
*.test

View File

@@ -1,6 +1,5 @@
FROM alpine
COPY hmq /
COPY hmq.config /
COPY ssl /ssl
COPY conf /conf

201
LICENSE Normal file
View File

@@ -0,0 +1,201 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "{}"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright {yyyy} {name of copyright owner}
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@@ -3,16 +3,42 @@ Free and High Performance MQTT Broker
## About
Golang MQTT Broker, Version 3.1.1, and Compatible
for [eclipse paho client](https://github.com/eclipse?utf8=%E2%9C%93&q=mqtt&type=&language=)
for [eclipse paho client](https://github.com/eclipse?utf8=%E2%9C%93&q=mqtt&type=&language=) and mosquitto-client
Download: [click here](https://github.com/fhmq/hmq/releases)
## RUNNING
```bash
$ git clone https://github.com/fhmq/hmq.git
$ cd hmq
$ go get github.com/fhmq/hmq
$ cd $GOPATH/github.com/fhmq/hmq
$ go run main.go
```
### broker.config
## Usage of hmq:
~~~
Usage: hmq [options]
Broker Options:
-w, --worker <number> Worker num to process message, perfer (client num)/10. (default 1024)
-p, --port <port> Use port for clients (default: 1883)
--host <host> Network host to listen on. (default "0.0.0.0")
-ws, --wsport <port> Use port for websocket monitoring
-wsp,--wspath <path> Use path for websocket monitoring
-c, --config <file> Configuration file
Logging Options:
-d, --debug <bool> Enable debugging output (default false)
-D Debug enabled
Cluster Options:
-r, --router <rurl> Router who maintenance cluster info
-cp, --clusterport <cluster-port> Cluster listen port for others
Common Options:
-h, --help Show this message
~~~
### hmq.config
~~~
{
"workerNum": 4096,
@@ -20,9 +46,9 @@ $ go run main.go
"host": "0.0.0.0",
"cluster": {
"host": "0.0.0.0",
"port": "1993",
"routers": ["10.10.0.11:1993","10.10.0.12:1993"]
"port": "1993"
},
"router": "127.0.0.1:9888",
"wsPort": "1888",
"wsPath": "/ws",
"wsTLS": true,
@@ -51,20 +77,21 @@ $ go run main.go
* Supports will messages
* Queue subscribe
* Websocket Support
* TLS/SSL Support
* Flexible ACL
### QUEUE SUBSCRIBE
~~~
| Prefix | Examples |
| ------------- |---------------------------------|
| $queue/ | mosquitto_sub -t $queue/topic |
~~~
### Cluster
```bash
1, start router for hmq (https://github.com/fhmq/router.git)
$ go get github.com/fhmq/router
$ cd $GOPATH/github.com/fhmq/router
$ go run main.go
2, config router in hmq.config ("router": "127.0.0.1:9888")
```
### ACL Configure
#### The ACL rules define:
@@ -118,6 +145,14 @@ Client -> | Rule1 | --nomatch--> | Rule2 | --nomatch--> | Rule3 | -->
allow | deny allow | deny allow | deny
~~~
### Online/Offline Notification
```bash
topic:
$SYS/broker/connection/clients/<clientID>
payload:
{"clientID":"client001","online":true/false,"timestamp":"2018-10-25T09:32:32Z"}
```
## Performance
* High throughput
@@ -129,4 +164,9 @@ Client -> | Rule1 | --nomatch--> | Rule2 | --nomatch--> | Rule3 | -->
## License
* Apache License Version 2.0
* Apache License Version 2.0
## Reference
* Surgermq.(https://github.com/surgemq/surgemq)

View File

@@ -1 +0,0 @@
theme: jekyll-theme-slate

View File

@@ -1,11 +1,12 @@
/* Copyright (c) 2018, joy.zhou <chowyu08@gmail.com>
*/
package broker
import (
"hmq/lib/acl"
"strings"
log "github.com/cihub/seelog"
"github.com/fhmq/hmq/lib/acl"
"github.com/fsnotify/fsnotify"
"go.uber.org/zap"
"strings"
)
const (
@@ -40,10 +41,10 @@ func (b *Broker) handleFsEvent(event fsnotify.Event) error {
case b.config.AclConf:
if event.Op&fsnotify.Write == fsnotify.Write ||
event.Op&fsnotify.Create == fsnotify.Create {
log.Info("text:handling acl config change event:", event)
log.Info("text:handling acl config change event:", zap.String("filename", event.Name))
aclconfig, err := acl.AclConfigLoad(event.Name)
if err != nil {
log.Error("aclconfig change failed, load acl conf error: ", err)
log.Error("aclconfig change failed, load acl conf error: ", zap.Error(err))
return err
}
b.AclConfig = aclconfig
@@ -56,14 +57,14 @@ func (b *Broker) StartAclWatcher() {
go func() {
wch, e := fsnotify.NewWatcher()
if e != nil {
log.Error("start monitor acl config file error,", e)
log.Error("start monitor acl config file error,", zap.Error(e))
return
}
defer wch.Close()
for _, i := range watchList {
if err := wch.Add(i); err != nil {
log.Error("start monitor acl config file error,", err)
log.Error("start monitor acl config file error,", zap.Error(err))
return
}
}
@@ -73,7 +74,7 @@ func (b *Broker) StartAclWatcher() {
case evt := <-wch.Events:
b.handleFsEvent(evt)
case err := <-wch.Errors:
log.Error("error:", err.Error())
log.Error("error:", zap.Error(err))
}
}
}()

View File

@@ -1,8 +1,10 @@
/* Copyright (c) 2018, joy.zhou <chowyu08@gmail.com>
*/
package broker
import (
"crypto/tls"
"hmq/lib/acl"
"fmt"
"net"
"net/http"
"runtime/debug"
@@ -11,39 +13,80 @@ import (
"time"
"github.com/eclipse/paho.mqtt.golang/packets"
"github.com/fhmq/hmq/lib/acl"
"github.com/fhmq/hmq/lib/sessions"
"github.com/fhmq/hmq/lib/topics"
"github.com/fhmq/hmq/pool"
"github.com/shirou/gopsutil/mem"
"go.uber.org/zap"
"golang.org/x/net/websocket"
log "github.com/cihub/seelog"
)
const (
MessagePoolNum = 1024
MessagePoolMessageNum = 1024
)
type Message struct {
client *client
packet packets.ControlPacket
}
type Broker struct {
id string
cid uint64
config *Config
tlsConfig *tls.Config
AclConfig *acl.ACLConfig
clients sync.Map
routes sync.Map
remotes sync.Map
sl *Sublist
rl *RetainList
queues map[string]int
id string
cid uint64
mu sync.Mutex
config *Config
tlsConfig *tls.Config
AclConfig *acl.ACLConfig
wpool *pool.WorkerPool
clients sync.Map
routes sync.Map
remotes sync.Map
nodes map[string]interface{}
clusterPool chan *Message
queues map[string]int
topicsMgr *topics.Manager
sessionMgr *sessions.Manager
// messagePool []chan *Message
}
func newMessagePool() []chan *Message {
pool := make([]chan *Message, 0)
for i := 0; i < MessagePoolNum; i++ {
ch := make(chan *Message, MessagePoolMessageNum)
pool = append(pool, ch)
}
return pool
}
func NewBroker(config *Config) (*Broker, error) {
b := &Broker{
id: GenUniqueId(),
config: config,
sl: NewSublist(),
rl: NewRetainList(),
queues: make(map[string]int),
id: GenUniqueId(),
config: config,
wpool: pool.New(config.Worker),
nodes: make(map[string]interface{}),
queues: make(map[string]int),
clusterPool: make(chan *Message),
}
var err error
b.topicsMgr, err = topics.NewManager("mem")
if err != nil {
log.Error("new topic manager error", zap.Error(err))
return nil, err
}
b.sessionMgr, err = sessions.NewManager("mem")
if err != nil {
log.Error("new session manager error", zap.Error(err))
return nil, err
}
if b.config.TlsPort != "" {
tlsconfig, err := NewTLSConfig(b.config.TlsInfo)
if err != nil {
log.Error("new tlsConfig error: ", err)
log.Error("new tlsConfig error", zap.Error(err))
return nil, err
}
b.tlsConfig = tlsconfig
@@ -51,7 +94,7 @@ func NewBroker(config *Config) (*Broker, error) {
if b.config.Acl {
aclconfig, err := acl.AclConfigLoad(b.config.AclConf)
if err != nil {
log.Error("Load acl conf error: ", err)
log.Error("Load acl conf error", zap.Error(err))
return nil, err
}
b.AclConfig = aclconfig
@@ -60,26 +103,54 @@ func NewBroker(config *Config) (*Broker, error) {
return b, nil
}
func (b *Broker) SubmitWork(msg *Message) {
if b.wpool == nil {
b.wpool = pool.New(b.config.Worker)
}
if msg.client.typ == CLUSTER {
b.clusterPool <- msg
} else {
b.wpool.Submit(func() {
ProcessMessage(msg)
})
}
}
func (b *Broker) Start() {
if b == nil {
log.Error("broker is null")
return
}
//listen clinet over tcp
if b.config.Port != "" {
go b.StartClientListening(false)
}
//listen for cluster
if b.config.Cluster.Port != "" {
go b.StartClusterListening()
}
//listen for websocket
if b.config.WsPort != "" {
go b.StartWebsocketListening()
}
//listen client over tls
if b.config.TlsPort != "" {
go b.StartClientListening(true)
}
if len(b.config.Cluster.Routes) > 0 {
b.ConnectToRouters()
//connect on other node in cluster
if b.config.Router != "" {
go b.processClusterInfo()
b.ConnectToDiscovery()
}
//system monitor
go StateMonitor()
}
@@ -93,7 +164,6 @@ func StateMonitor() {
if v.UsedPercent > 75 {
debug.FreeOSMemory()
}
// fmt.Printf("Total: %v, Free:%v, UsedPercent:%f%%\n", v.Total, v.Free, v.UsedPercent)
}
}
}
@@ -101,7 +171,7 @@ func StateMonitor() {
func (b *Broker) StartWebsocketListening() {
path := b.config.WsPath
hp := ":" + b.config.WsPort
log.Info("Start Webscoker Listening on ", hp, path)
log.Info("Start Websocket Listener on:", zap.String("hp", hp), zap.String("path", path))
http.Handle(path, websocket.Handler(b.wsHandler))
var err error
if b.config.WsTLS {
@@ -110,7 +180,7 @@ func (b *Broker) StartWebsocketListening() {
err = http.ListenAndServe(hp, nil)
}
if err != nil {
log.Error("ListenAndServe: " + err.Error())
log.Error("ListenAndServe:" + err.Error())
return
}
}
@@ -119,7 +189,7 @@ func (b *Broker) wsHandler(ws *websocket.Conn) {
// io.Copy(ws, ws)
atomic.AddUint64(&b.cid, 1)
ws.PayloadType = websocket.BinaryFrame
b.handleConnection(CLIENT, ws, b.cid)
b.handleConnection(CLIENT, ws)
}
func (b *Broker) StartClientListening(Tls bool) {
@@ -129,14 +199,14 @@ func (b *Broker) StartClientListening(Tls bool) {
if Tls {
hp = b.config.TlsHost + ":" + b.config.TlsPort
l, err = tls.Listen("tcp", hp, b.tlsConfig)
log.Info("Start TLS Listening client on ", hp)
log.Info("Start TLS Listening client on ", zap.String("hp", hp))
} else {
hp := b.config.Host + ":" + b.config.Port
l, err = net.Listen("tcp", hp)
log.Info("Start Listening client on ", hp)
log.Info("Start Listening client on ", zap.String("hp", hp))
}
if err != nil {
log.Error("Error listening on ", err)
log.Error("Error listening on ", zap.Error(err))
return
}
tmpDelay := 10 * ACCEPT_MIN_SLEEP
@@ -145,20 +215,20 @@ func (b *Broker) StartClientListening(Tls bool) {
if err != nil {
if ne, ok := err.(net.Error); ok && ne.Temporary() {
log.Error("Temporary Client Accept Error(%v), sleeping %dms",
ne, tmpDelay/time.Millisecond)
zap.Error(ne), zap.Duration("sleeping", tmpDelay/time.Millisecond))
time.Sleep(tmpDelay)
tmpDelay *= 2
if tmpDelay > ACCEPT_MAX_SLEEP {
tmpDelay = ACCEPT_MAX_SLEEP
}
} else {
log.Error("Accept error: %v", err)
log.Error("Accept error: %v", zap.Error(err))
}
continue
}
tmpDelay = ACCEPT_MIN_SLEEP
atomic.AddUint64(&b.cid, 1)
go b.handleConnection(CLIENT, conn, b.cid)
go b.handleConnection(CLIENT, conn)
}
}
@@ -171,7 +241,7 @@ func (b *Broker) Handshake(conn net.Conn) bool {
// Force handshake
if err := nc.Handshake(); err != nil {
log.Error("TLS handshake error, ", err)
log.Error("TLS handshake error, ", zap.Error(err))
return false
}
nc.SetReadDeadline(time.Time{})
@@ -194,49 +264,42 @@ func TlsTimeout(conn *tls.Conn) {
func (b *Broker) StartClusterListening() {
var hp string = b.config.Cluster.Host + ":" + b.config.Cluster.Port
log.Info("Start Listening cluster on ", hp)
log.Info("Start Listening cluster on ", zap.String("hp", hp))
l, e := net.Listen("tcp", hp)
if e != nil {
log.Error("Error listening on ", e)
log.Error("Error listening on ", zap.Error(e))
return
}
var idx uint64 = 0
tmpDelay := 10 * ACCEPT_MIN_SLEEP
for {
conn, err := l.Accept()
if err != nil {
if ne, ok := err.(net.Error); ok && ne.Temporary() {
log.Error("Temporary Client Accept Error(%v), sleeping %dms",
ne, tmpDelay/time.Millisecond)
zap.Error(ne), zap.Duration("sleeping", tmpDelay/time.Millisecond))
time.Sleep(tmpDelay)
tmpDelay *= 2
if tmpDelay > ACCEPT_MAX_SLEEP {
tmpDelay = ACCEPT_MAX_SLEEP
}
} else {
log.Error("Accept error: %v", err)
log.Error("Accept error: %v", zap.Error(err))
}
continue
}
tmpDelay = ACCEPT_MIN_SLEEP
go b.handleConnection(ROUTER, conn, idx)
if idx == 1 {
idx = 0
} else {
idx = idx + 1
}
go b.handleConnection(ROUTER, conn)
}
}
func (b *Broker) handleConnection(typ int, conn net.Conn, idx uint64) {
func (b *Broker) handleConnection(typ int, conn net.Conn) {
//process connect packet
packet, err := packets.ReadPacket(conn)
if err != nil {
log.Error("read connect packet error: ", err)
log.Error("read connect packet error: ", zap.Error(err))
return
}
if packet == nil {
@@ -253,7 +316,7 @@ func (b *Broker) handleConnection(typ int, conn net.Conn, idx uint64) {
connack.SessionPresent = msg.CleanSession
err = connack.Write(conn)
if err != nil {
log.Error("send connack error, ", err, " clientID = ", msg.ClientIdentifier)
log.Error("send connack error, ", zap.Error(err), zap.String("clientID", msg.ClientIdentifier))
return
}
@@ -281,86 +344,196 @@ func (b *Broker) handleConnection(typ int, conn net.Conn, idx uint64) {
conn: conn,
info: info,
}
c.init()
err = b.getSession(c, msg, connack)
if err != nil {
log.Error("get session error: ", zap.String("clientID", c.info.clientID))
return
}
cid := c.info.clientID
var msgPool *MessagePool
var exist bool
var old interface{}
switch typ {
case CLIENT:
msgPool = MSGPool[idx%MessagePoolNum].GetPool()
c.mp = msgPool
old, exist = b.clients.Load(cid)
if exist {
log.Warn("client exist, close old...", " clientID = ", c.info.clientID)
log.Warn("client exist, close old...", zap.String("clientID", c.info.clientID))
ol, ok := old.(*client)
if ok {
msg := &Message{client: c, packet: DisconnectdPacket}
ol.mp.queue <- msg
ol.Close()
}
}
b.clients.Store(cid, c)
b.OnlineOfflineNotification(cid, true)
case ROUTER:
msgPool = MSGPool[(MessagePoolNum + idx)].GetPool()
c.mp = msgPool
old, exist = b.routes.Load(cid)
if exist {
log.Warn("router exist, close old...")
ol, ok := old.(*client)
if ok {
msg := &Message{client: c, packet: DisconnectdPacket}
ol.mp.queue <- msg
ol.Close()
}
}
b.routes.Store(cid, c)
}
// mpool := b.messagePool[fnv1a.HashString64(cid)%MessagePoolNum]
c.readLoop()
}
func (b *Broker) ConnectToRouters() {
for i := 0; i < len(b.config.Cluster.Routes); i++ {
url := b.config.Cluster.Routes[i]
go b.connectRouter(url, "")
}
}
func (b *Broker) connectRouter(url, remoteID string) {
func (b *Broker) ConnectToDiscovery() {
var conn net.Conn
var err error
var tempDelay time.Duration = 0
for {
conn, err = net.Dial("tcp", url)
conn, err = net.Dial("tcp", b.config.Router)
if err != nil {
log.Error("Error trying to connect to route: ", err)
select {
case <-time.After(DEFAULT_ROUTE_CONNECT):
log.Debug("Connect to route timeout ,retry...")
continue
log.Error("Error trying to connect to route: ", zap.Error(err))
log.Debug("Connect to route timeout ,retry...")
if 0 == tempDelay {
tempDelay = 1 * time.Second
} else {
tempDelay *= 2
}
if max := 20 * time.Second; tempDelay > max {
tempDelay = max
}
time.Sleep(tempDelay)
continue
}
break
}
route := &route{
remoteID: remoteID,
remoteUrl: url,
log.Debug("connect to router success :", zap.String("Router", b.config.Router))
cid := b.id
info := info{
clientID: cid,
keepalive: 60,
}
c := &client{
typ: CLUSTER,
broker: b,
conn: conn,
info: info,
}
c.init()
c.SendConnect()
c.SendInfo()
go c.readLoop()
go c.StartPing()
}
func (b *Broker) processClusterInfo() {
for {
msg, ok := <-b.clusterPool
if !ok {
log.Error("read message from cluster channel error")
return
}
ProcessMessage(msg)
}
}
func (b *Broker) connectRouter(id, addr string) {
var conn net.Conn
var err error
var timeDelay time.Duration = 0
retryTimes := 0
max := 32 * time.Second
for {
if !b.checkNodeExist(id, addr) {
return
}
conn, err = net.Dial("tcp", addr)
if err != nil {
log.Error("Error trying to connect to route: ", zap.Error(err))
if retryTimes > 50 {
return
}
log.Debug("Connect to route timeout ,retry...")
if 0 == timeDelay {
timeDelay = 1 * time.Second
} else {
timeDelay *= 2
}
if timeDelay > max {
timeDelay = max
}
time.Sleep(timeDelay)
retryTimes++
continue
}
break
}
route := route{
remoteID: id,
remoteUrl: addr,
}
cid := GenUniqueId()
info := info{
clientID: cid,
clientID: cid,
keepalive: 60,
}
c := &client{
typ: REMOTE,
conn: conn,
route: route,
info: info,
broker: b,
typ: REMOTE,
conn: conn,
route: route,
info: info,
}
c.init()
b.remotes.Store(cid, c)
c.SendConnect()
c.SendInfo()
c.StartPing()
// mpool := b.messagePool[fnv1a.HashString64(cid)%MessagePoolNum]
go c.readLoop()
go c.StartPing()
}
func (b *Broker) checkNodeExist(id, url string) bool {
if id == b.id {
return false
}
for k, v := range b.nodes {
if k == id {
return true
}
//skip
l, ok := v.(string)
if ok {
if url == l {
return true
}
}
}
return false
}
func (b *Broker) CheckRemoteExist(remoteID, url string) bool {
@@ -369,9 +542,7 @@ func (b *Broker) CheckRemoteExist(remoteID, url string) bool {
v, ok := value.(*client)
if ok {
if v.route.remoteUrl == url {
// if v.route.remoteID == "" || v.route.remoteID != remoteID {
v.route.remoteID = remoteID
// }
exist = true
return false
}
@@ -386,22 +557,24 @@ func (b *Broker) SendLocalSubsToRouter(c *client) {
b.clients.Range(func(key, value interface{}) bool {
client, ok := value.(*client)
if ok {
subs := client.subs
subs := client.subMap
for _, sub := range subs {
subInfo.Topics = append(subInfo.Topics, string(sub.topic))
subInfo.Topics = append(subInfo.Topics, sub.topic)
subInfo.Qoss = append(subInfo.Qoss, sub.qos)
}
}
return true
})
err := c.WriterPacket(subInfo)
if err != nil {
log.Error("Send localsubs To Router error :", err)
if len(subInfo.Topics) > 0 {
err := c.WriterPacket(subInfo)
if err != nil {
log.Error("Send localsubs To Router error :", zap.Error(err))
}
}
}
func (b *Broker) BroadcastInfoMessage(remoteID string, msg *packets.PublishPacket) {
b.remotes.Range(func(key, value interface{}) bool {
b.routes.Range(func(key, value interface{}) bool {
r, ok := value.(*client)
if ok {
if r.route.remoteID == remoteID {
@@ -416,7 +589,8 @@ func (b *Broker) BroadcastInfoMessage(remoteID string, msg *packets.PublishPacke
}
func (b *Broker) BroadcastSubOrUnsubMessage(packet packets.ControlPacket) {
b.remotes.Range(func(key, value interface{}) bool {
b.routes.Range(func(key, value interface{}) bool {
r, ok := value.(*client)
if ok {
r.WriterPacket(packet)
@@ -441,18 +615,22 @@ func (b *Broker) removeClient(c *client) {
}
func (b *Broker) PublishMessage(packet *packets.PublishPacket) {
topic := packet.TopicName
r := b.sl.Match(topic)
// log.Info("psubs num: ", len(r.psubs))
if len(r.psubs) == 0 {
var subs []interface{}
var qoss []byte
b.mu.Lock()
err := b.topicsMgr.Subscribers([]byte(packet.TopicName), packet.Qos, &subs, &qoss)
b.mu.Unlock()
if err != nil {
log.Error("search sub client error, ", zap.Error(err))
return
}
for _, sub := range r.psubs {
if sub != nil {
err := sub.client.WriterPacket(packet)
for _, sub := range subs {
s, ok := sub.(*subscription)
if ok {
err := s.client.WriterPacket(packet)
if err != nil {
log.Error("process message for psub error, ", err)
log.Error("write message error, ", zap.Error(err))
}
}
}
@@ -460,14 +638,21 @@ func (b *Broker) PublishMessage(packet *packets.PublishPacket) {
func (b *Broker) BroadcastUnSubscribe(subs map[string]*subscription) {
ubsub := packets.NewControlPacket(packets.Unsubscribe).(*packets.UnsubscribePacket)
unsub := packets.NewControlPacket(packets.Unsubscribe).(*packets.UnsubscribePacket)
for topic, _ := range subs {
// topic := sub.topic
// if sub.queue {
// topic = "$queue/" + sub.topic
// }
ubsub.Topics = append(ubsub.Topics, topic)
unsub.Topics = append(unsub.Topics, topic)
}
b.BroadcastSubOrUnsubMessage(ubsub)
if len(unsub.Topics) > 0 {
b.BroadcastSubOrUnsubMessage(unsub)
}
}
func (b *Broker) OnlineOfflineNotification(clientID string, online bool) {
packet := packets.NewControlPacket(packets.Publish).(*packets.PublishPacket)
packet.TopicName = "$SYS/broker/connection/clients/" + clientID
packet.Qos = 0
packet.Payload = []byte(fmt.Sprintf(`{"clientID":"%s","online":%v,"timestamp":"%s"}`, clientID, online, time.Now().UTC().Format(time.RFC3339)))
b.PublishMessage(packet)
}

View File

@@ -1,25 +1,32 @@
/* Copyright (c) 2018, joy.zhou <chowyu08@gmail.com>
*/
package broker
import (
"context"
"errors"
"net"
"reflect"
"strings"
"sync"
"time"
"github.com/eclipse/paho.mqtt.golang/packets"
log "github.com/cihub/seelog"
"github.com/fhmq/hmq/lib/sessions"
"github.com/fhmq/hmq/lib/topics"
"go.uber.org/zap"
)
const (
// special pub topic for cluster info BrokerInfoTopic
BrokerInfoTopic = "broker001info/brokerinfo"
BrokerInfoTopic = "broker000100101info"
// CLIENT is an end user.
CLIENT = 0
// ROUTER is another router in the cluster.
ROUTER = 1
//REMOTE is the router connect to other cluster
REMOTE = 2
REMOTE = 2
CLUSTER = 3
)
const (
Connected = 1
@@ -27,22 +34,21 @@ const (
)
type client struct {
typ int
mu sync.Mutex
broker *Broker
conn net.Conn
info info
route *route
status int
smu sync.RWMutex
mp *MessagePool
subs map[string]*subscription
rsubs map[string]*subInfo
}
type subInfo struct {
sub *subscription
num int
typ int
mu sync.Mutex
broker *Broker
conn net.Conn
info info
route route
status int
ctx context.Context
cancelFunc context.CancelFunc
session *sessions.Session
subMap map[string]*subscription
topicsMgr *topics.Manager
subs []interface{}
qoss []byte
rmsgs []*packets.PublishPacket
}
type subscription struct {
@@ -72,50 +78,59 @@ var (
)
func (c *client) init() {
c.smu.Lock()
defer c.smu.Unlock()
c.status = Connected
typ := c.typ
if typ == ROUTER {
c.rsubs = make(map[string]*subInfo)
} else if typ == CLIENT {
c.subs = make(map[string]*subscription, 10)
}
c.info.localIP = strings.Split(c.conn.LocalAddr().String(), ":")[0]
c.info.remoteIP = strings.Split(c.conn.RemoteAddr().String(), ":")[0]
c.ctx, c.cancelFunc = context.WithCancel(context.Background())
c.subMap = make(map[string]*subscription)
c.topicsMgr = c.broker.topicsMgr
}
func (c *client) readLoop() {
nc := c.conn
msgPool := c.mp
if nc == nil || msgPool == nil {
b := c.broker
if nc == nil || b == nil {
return
}
lastIn := uint16(time.Now().Unix())
var nowTime uint16
keepAlive := time.Second * time.Duration(c.info.keepalive)
timeOut := keepAlive + (keepAlive / 2)
for {
nowTime = uint16(time.Now().Unix())
if 0 != c.info.keepalive && nowTime-lastIn > c.info.keepalive*3/2 {
log.Errorf("Client %s has exceeded timeout, disconnecting.\n", c.info.clientID)
break
select {
case <-c.ctx.Done():
return
default:
//add read timeout
if err := nc.SetReadDeadline(time.Now().Add(timeOut)); err != nil {
log.Error("set read timeout error: ", zap.Error(err), zap.String("ClientID", c.info.clientID))
return
}
packet, err := packets.ReadPacket(nc)
if err != nil {
log.Error("read packet error: ", zap.Error(err), zap.String("ClientID", c.info.clientID))
msg := &Message{client: c, packet: DisconnectdPacket}
b.SubmitWork(msg)
// remove subscriptions related to that client
for topic, sub := range c.subMap {
t := []byte(topic)
c.topicsMgr.Unsubscribe(t, sub)
c.session.RemoveTopic(topic)
delete(c.subMap, topic)
}
return
}
msg := &Message{
client: c,
packet: packet,
}
b.SubmitWork(msg)
}
packet, err := packets.ReadPacket(nc)
if err != nil {
log.Error("read packet error: ", err, " clientID = ", c.info.clientID)
break
}
// log.Info("recv buf: ", packet)
lastIn = uint16(time.Now().Unix())
msg := &Message{
client: c,
packet: packet,
}
msgPool.queue <- msg
}
msg := &Message{client: c, packet: DisconnectdPacket}
msgPool.queue <- msg
msgPool.Reduce()
}
func ProcessMessage(msg *Message) {
@@ -124,10 +139,9 @@ func ProcessMessage(msg *Message) {
if ca == nil {
return
}
log.Debug("Recv message: ", ca.String(), " clientID = ", c.info.clientID)
log.Debug("Recv message:", zap.String("message type", reflect.TypeOf(msg.packet).String()[9:]), zap.String("ClientID", c.info.clientID))
switch ca.(type) {
case *packets.ConnackPacket:
case *packets.ConnectPacket:
case *packets.PublishPacket:
packet := ca.(*packets.PublishPacket)
@@ -150,7 +164,7 @@ func ProcessMessage(msg *Message) {
case *packets.DisconnectPacket:
c.Close()
default:
log.Info("Recv Unknow message.......", " clientID = ", c.info.clientID)
log.Info("Recv Unknow message.......", zap.String("ClientID", c.info.clientID))
}
}
@@ -160,8 +174,13 @@ func (c *client) ProcessPublish(packet *packets.PublishPacket) {
}
topic := packet.TopicName
if topic == BrokerInfoTopic && c.typ == CLUSTER {
c.ProcessInfo(packet)
return
}
if !c.CheckTopicAuth(PUB, topic) {
log.Error("Pub Topics Auth failed, ", topic, " clientID = ", c.info.clientID)
log.Error("Pub Topics Auth failed, ", zap.String("topic", topic), zap.String("ClientID", c.info.clientID))
return
}
@@ -172,24 +191,16 @@ func (c *client) ProcessPublish(packet *packets.PublishPacket) {
puback := packets.NewControlPacket(packets.Puback).(*packets.PubackPacket)
puback.MessageID = packet.MessageID
if err := c.WriterPacket(puback); err != nil {
log.Error("send puback error, ", err, " clientID = ", c.info.clientID)
log.Error("send puback error, ", zap.Error(err), zap.String("ClientID", c.info.clientID))
return
}
c.ProcessPublishMessage(packet)
case QosExactlyOnce:
return
default:
log.Error("publish with unknown qos", " clientID = ", c.info.clientID)
log.Error("publish with unknown qos", zap.String("ClientID", c.info.clientID))
return
}
if packet.Retain {
if b := c.broker; b != nil {
err := b.rl.Insert(topic, packet)
if err != nil {
log.Error("Insert Retain Message error: ", err, " clientID = ", c.info.clientID)
}
}
}
}
@@ -203,81 +214,42 @@ func (c *client) ProcessPublishMessage(packet *packets.PublishPacket) {
return
}
typ := c.typ
topic := packet.TopicName
r := b.sl.Match(topic)
// log.Info("psubs num: ", len(r.psubs))
if len(r.qsubs) == 0 && len(r.psubs) == 0 {
if packet.Retain {
if err := c.topicsMgr.Retain(packet); err != nil {
log.Error("Error retaining message: ", zap.Error(err), zap.String("ClientID", c.info.clientID))
}
}
c.mu.Lock()
err := c.topicsMgr.Subscribers([]byte(packet.TopicName), packet.Qos, &c.subs, &c.qoss)
c.mu.Unlock()
if err != nil {
log.Error("Error retrieving subscribers list: ", zap.String("ClientID", c.info.clientID))
return
}
for _, sub := range r.psubs {
if sub.client.typ == ROUTER {
if typ == ROUTER {
continue
}
}
if sub != nil {
err := sub.client.WriterPacket(packet)
if err != nil {
log.Error("process message for psub error, ", err, " clientID = ", c.info.clientID)
}
}
// log.Info("psubs num: ", len(r.psubs))
if len(c.subs) == 0 {
return
}
pre := -1
now := -1
t := "$queue/" + topic
cnt, exist := b.queues[t]
if exist {
// log.Info("queue index : ", cnt)
for _, sub := range r.qsubs {
if sub.client.typ == ROUTER {
if c.typ == ROUTER {
for _, sub := range c.subs {
s, ok := sub.(*subscription)
if ok {
if s.client.typ == ROUTER {
if typ != CLIENT {
continue
}
}
if c.typ == CLIENT {
now = now + 1
} else {
now = now + sub.client.rsubs[t].num
err := s.client.WriterPacket(packet)
if err != nil {
log.Error("process message for psub error, ", zap.Error(err), zap.String("ClientID", c.info.clientID))
}
if cnt > pre && cnt <= now {
if sub != nil {
err := sub.client.WriterPacket(packet)
if err != nil {
log.Error("send publish error, ", err, " clientID = ", c.info.clientID)
}
}
break
}
pre = now
}
}
length := getQueueSubscribeNum(r.qsubs)
if length > 0 {
b.queues[t] = (b.queues[t] + 1) % length
}
}
func getQueueSubscribeNum(qsubs []*subscription) int {
topic := "$queue/"
if len(qsubs) < 1 {
return 0
} else {
topic = topic + qsubs[0].topic
}
num := 0
for _, sub := range qsubs {
if sub.client.typ == CLIENT {
num = num + 1
} else {
num = num + sub.client.rsubs[topic].num
}
}
return num
}
func (c *client) ProcessSubscribe(packet *packets.SubscribePacket) {
@@ -300,64 +272,34 @@ func (c *client) ProcessSubscribe(packet *packets.SubscribePacket) {
t := topic
//check topic auth for client
if !c.CheckTopicAuth(SUB, topic) {
log.Error("Sub topic Auth failed: ", topic, " clientID = ", c.info.clientID)
log.Error("Sub topic Auth failed: ", zap.String("topic", topic), zap.String("ClientID", c.info.clientID))
retcodes = append(retcodes, QosFailure)
continue
}
queue := strings.HasPrefix(topic, "$queue/")
if queue {
if len(t) > 7 {
t = t[7:]
if _, exists := b.queues[topic]; !exists {
b.queues[topic] = 0
}
} else {
retcodes = append(retcodes, QosFailure)
continue
}
}
sub := &subscription{
topic: t,
qos: qoss[i],
client: c,
queue: queue,
}
switch c.typ {
case CLIENT:
if _, exist := c.subs[topic]; !exist {
c.subs[topic] = sub
} else {
//if exist ,check whether qos change
c.subs[topic].qos = qoss[i]
retcodes = append(retcodes, qoss[i])
continue
}
case ROUTER:
if subinfo, exist := c.rsubs[topic]; !exist {
sinfo := &subInfo{sub: sub, num: 1}
c.rsubs[topic] = sinfo
} else {
subinfo.num = subinfo.num + 1
retcodes = append(retcodes, qoss[i])
continue
}
}
err := b.sl.Insert(sub)
rqos, err := c.topicsMgr.Subscribe([]byte(topic), qoss[i], sub)
if err != nil {
log.Error("Insert subscription error: ", err, " clientID = ", c.info.clientID)
retcodes = append(retcodes, QosFailure)
} else {
retcodes = append(retcodes, qoss[i])
return
}
c.subMap[topic] = sub
c.session.AddTopic(topic, qoss[i])
retcodes = append(retcodes, rqos)
c.topicsMgr.Retained([]byte(topic), &c.rmsgs)
}
suback.ReturnCodes = retcodes
err := c.WriterPacket(suback)
if err != nil {
log.Error("send suback error, ", err, " clientID = ", c.info.clientID)
log.Error("send suback error, ", zap.Error(err), zap.String("ClientID", c.info.clientID))
return
}
//broadcast subscribe message
@@ -366,13 +308,11 @@ func (c *client) ProcessSubscribe(packet *packets.SubscribePacket) {
}
//process retain message
for _, t := range topics {
packets := b.rl.Match(t)
for _, packet := range packets {
log.Info("process retain message: ", packet, " clientID = ", c.info.clientID)
if packet != nil {
c.WriterPacket(packet)
}
for _, rm := range c.rmsgs {
if err := c.WriterPacket(rm); err != nil {
log.Error("Error publishing retained message:", zap.Any("err", err), zap.String("ClientID", c.info.clientID))
} else {
log.Info("process retain message: ", zap.Any("packet", packet), zap.String("ClientID", c.info.clientID))
}
}
}
@@ -385,34 +325,16 @@ func (c *client) ProcessUnSubscribe(packet *packets.UnsubscribePacket) {
if b == nil {
return
}
typ := c.typ
topics := packet.Topics
for _, t := range topics {
var sub *subscription
ok := false
switch typ {
case CLIENT:
sub, ok = c.subs[t]
case ROUTER:
subinfo, ok := c.rsubs[t]
if ok {
subinfo.num = subinfo.num - 1
if subinfo.num < 1 {
sub = subinfo.sub
delete(c.rsubs, t)
} else {
c.rsubs[t] = subinfo
sub = nil
}
} else {
return
}
for _, topic := range topics {
t := []byte(topic)
sub, exist := c.subMap[topic]
if exist {
c.topicsMgr.Unsubscribe(t, sub)
c.session.RemoveTopic(topic)
delete(c.subMap, topic)
}
if ok {
go c.unsubscribe(sub)
}
}
unsuback := packets.NewControlPacket(packets.Unsuback).(*packets.UnsubackPacket)
@@ -420,7 +342,7 @@ func (c *client) ProcessUnSubscribe(packet *packets.UnsubscribePacket) {
err := c.WriterPacket(unsuback)
if err != nil {
log.Error("send unsuback error, ", err, " clientID = ", c.info.clientID)
log.Error("send unsuback error, ", zap.Error(err), zap.String("ClientID", c.info.clientID))
return
}
// //process ubsubscribe message
@@ -429,19 +351,6 @@ func (c *client) ProcessUnSubscribe(packet *packets.UnsubscribePacket) {
}
}
func (c *client) unsubscribe(sub *subscription) {
if c.typ == CLIENT {
delete(c.subs, sub.topic)
}
b := c.broker
if b != nil && sub != nil {
b.sl.Remove(sub)
}
}
func (c *client) ProcessPing() {
if c.status == Disconnected {
return
@@ -449,7 +358,7 @@ func (c *client) ProcessPing() {
resp := packets.NewControlPacket(packets.Pingresp).(*packets.PingrespPacket)
err := c.WriterPacket(resp)
if err != nil {
log.Error("send PingResponse error, ", err, " clientID = ", c.info.clientID)
log.Error("send PingResponse error, ", zap.Error(err), zap.String("ClientID", c.info.clientID))
return
}
}
@@ -458,12 +367,13 @@ func (c *client) Close() {
if c.status == Disconnected {
return
}
//wait for message complete
time.Sleep(time.Second)
c.smu.Lock()
c.cancelFunc()
c.status = Disconnected
c.smu.Unlock()
//wait for message complete
time.Sleep(1 * time.Second)
// c.status = Disconnected
if c.conn != nil {
c.conn.Close()
@@ -471,25 +381,44 @@ func (c *client) Close() {
}
b := c.broker
subs := c.subs
subs := c.subMap
if b != nil {
b.removeClient(c)
for _, sub := range subs {
err := b.sl.Remove(sub)
if err != nil {
log.Error("closed client but remove sublist error, ", err, " clientID = ", c.info.clientID)
}
}
if c.typ == CLIENT {
b.BroadcastUnSubscribe(subs)
//offline notification
b.OnlineOfflineNotification(c.info.clientID, false)
}
if c.info.willMsg != nil {
b.PublishMessage(c.info.willMsg)
}
if c.typ == CLUSTER {
b.ConnectToDiscovery()
}
//do reconnect
if c.typ == REMOTE {
go b.connectRouter(c.route.remoteID, c.route.remoteUrl)
}
}
}
func (c *client) WriterPacket(packet packets.ControlPacket) error {
if c.status == Disconnected {
return nil
}
if packet == nil {
return nil
}
if c.conn == nil {
c.Close()
return errors.New("connect lost ....")
}
c.mu.Lock()
err := packet.Write(c.conn)
c.mu.Unlock()

View File

@@ -1,3 +1,5 @@
/* Copyright (c) 2018, joy.zhou <chowyu08@gmail.com>
*/
package broker
import (
@@ -5,10 +7,8 @@ import (
"crypto/rand"
"encoding/base64"
"encoding/hex"
"errors"
"io"
"reflect"
"strings"
"time"
)
@@ -46,47 +46,6 @@ const (
QosFailure = 0x80
)
func SubscribeTopicCheckAndSpilt(topic string) ([]string, error) {
if strings.Index(topic, "#") != -1 && strings.Index(topic, "#") != len(topic)-1 {
return nil, errors.New("Topic format error with index of #")
}
re := strings.Split(topic, "/")
for i, v := range re {
if i != 0 && i != (len(re)-1) {
if v == "" {
return nil, errors.New("Topic format error with index of //")
}
if strings.Contains(v, "+") && v != "+" {
return nil, errors.New("Topic format error with index of +")
}
} else {
if v == "" {
re[i] = "/"
}
}
}
return re, nil
}
func PublishTopicCheckAndSpilt(topic string) ([]string, error) {
if strings.Index(topic, "#") != -1 || strings.Index(topic, "+") != -1 {
return nil, errors.New("Publish Topic format error with + and #")
}
re := strings.Split(topic, "/")
for i, v := range re {
if v == "" {
if i != 0 && i != (len(re)-1) {
return nil, errors.New("Topic format error with index of //")
} else {
re[i] = "/"
}
}
}
return re, nil
}
func equal(k1, k2 interface{}) bool {
if reflect.TypeOf(k1) != reflect.TypeOf(k2) {
return false

View File

@@ -1,17 +1,19 @@
/* Copyright (c) 2018, joy.zhou <chowyu08@gmail.com>
*/
package broker
import (
"crypto/tls"
"crypto/x509"
"encoding/json"
"errors"
"flag"
"fmt"
"io/ioutil"
"os"
log "github.com/cihub/seelog"
)
const (
CONFIGFILE = "hmq.config"
"github.com/fhmq/hmq/logger"
"go.uber.org/zap"
)
type Config struct {
@@ -19,6 +21,7 @@ type Config struct {
Host string `json:"host"`
Port string `json:"port"`
Cluster RouteInfo `json:"cluster"`
Router string `json:"router"`
TlsHost string `json:"tlsHost"`
TlsPort string `json:"tlsPort"`
WsPath string `json:"wsPath"`
@@ -27,12 +30,12 @@ type Config struct {
TlsInfo TLSInfo `json:"tlsInfo"`
Acl bool `json:"acl"`
AclConf string `json:"aclConf"`
Debug bool `json:"-"`
}
type RouteInfo struct {
Host string `json:"host"`
Port string `json:"port"`
Routes []string `json:"routes"`
Host string `json:"host"`
Port string `json:"port"`
}
type TLSInfo struct {
@@ -42,11 +45,94 @@ type TLSInfo struct {
KeyFile string `json:"keyFile"`
}
func LoadConfig() (*Config, error) {
var DefaultConfig *Config = &Config{
Worker: 4096,
Host: "0.0.0.0",
Port: "1883",
Acl: false,
}
content, err := ioutil.ReadFile(CONFIGFILE)
var (
log *zap.Logger
)
func showHelp() {
fmt.Printf("%s\n", usageStr)
os.Exit(0)
}
func ConfigureConfig(args []string) (*Config, error) {
config := &Config{}
var (
help bool
configFile string
)
fs := flag.NewFlagSet("hmq-broker", flag.ExitOnError)
fs.Usage = showHelp
fs.BoolVar(&help, "h", false, "Show this message.")
fs.BoolVar(&help, "help", false, "Show this message.")
fs.IntVar(&config.Worker, "w", 1024, "worker num to process message, perfer (client num)/10.")
fs.IntVar(&config.Worker, "worker", 1024, "worker num to process message, perfer (client num)/10.")
fs.StringVar(&config.Port, "port", "1883", "Port to listen on.")
fs.StringVar(&config.Port, "p", "1883", "Port to listen on.")
fs.StringVar(&config.Host, "host", "0.0.0.0", "Network host to listen on")
fs.StringVar(&config.Cluster.Port, "cp", "", "Cluster port from which members can connect.")
fs.StringVar(&config.Cluster.Port, "clusterport", "", "Cluster port from which members can connect.")
fs.StringVar(&config.Router, "r", "", "Router who maintenance cluster info")
fs.StringVar(&config.Router, "router", "", "Router who maintenance cluster info")
fs.StringVar(&config.WsPort, "ws", "", "port for ws to listen on")
fs.StringVar(&config.WsPort, "wsport", "", "port for ws to listen on")
fs.StringVar(&config.WsPath, "wsp", "", "path for ws to listen on")
fs.StringVar(&config.WsPath, "wspath", "", "path for ws to listen on")
fs.StringVar(&configFile, "config", "", "config file for hmq")
fs.StringVar(&configFile, "c", "", "config file for hmq")
fs.BoolVar(&config.Debug, "debug", false, "enable Debug logging.")
fs.BoolVar(&config.Debug, "d", false, "enable Debug logging.")
fs.Bool("D", true, "enable Debug logging.")
if err := fs.Parse(args); err != nil {
return nil, err
}
if help {
showHelp()
return nil, nil
}
fs.Visit(func(f *flag.Flag) {
switch f.Name {
case "D":
config.Debug = true
}
})
logger.InitLogger(config.Debug)
log = logger.Get().Named("Broker")
if configFile != "" {
tmpConfig, e := LoadConfig(configFile)
if e != nil {
return nil, e
} else {
config = tmpConfig
}
}
if err := config.check(); err != nil {
return nil, err
}
return config, nil
}
func LoadConfig(filename string) (*Config, error) {
content, err := ioutil.ReadFile(filename)
if err != nil {
log.Error("Read config file error: ", err)
log.Error("Read config file error: ", zap.Error(err))
return nil, err
}
// log.Info(string(content))
@@ -54,16 +140,19 @@ func LoadConfig() (*Config, error) {
var config Config
err = json.Unmarshal(content, &config)
if err != nil {
log.Error("Unmarshal config file error: ", err)
log.Error("Unmarshal config file error: ", zap.Error(err))
return nil, err
}
return &config, nil
}
func (config *Config) check() error {
if config.Worker == 0 {
config.Worker = 1024
}
WorkNum = config.Worker
if config.Port != "" {
if config.Host == "" {
config.Host = "0.0.0.0"
@@ -75,29 +164,33 @@ func LoadConfig() (*Config, error) {
config.Cluster.Host = "0.0.0.0"
}
}
if config.Router != "" {
if config.Cluster.Port == "" {
return errors.New("cluster port is null")
}
}
if config.TlsPort != "" {
if config.TlsInfo.CertFile == "" || config.TlsInfo.KeyFile == "" {
log.Error("tls config error, no cert or key file.")
return nil, err
return errors.New("tls config error, no cert or key file.")
}
if config.TlsHost == "" {
config.TlsHost = "0.0.0.0"
}
}
return &config, nil
return nil
}
func NewTLSConfig(tlsInfo TLSInfo) (*tls.Config, error) {
cert, err := tls.LoadX509KeyPair(tlsInfo.CertFile, tlsInfo.KeyFile)
if err != nil {
return nil, fmt.Errorf("error parsing X509 certificate/key pair: %v", err)
return nil, fmt.Errorf("error parsing X509 certificate/key pair: %v", zap.Error(err))
}
cert.Leaf, err = x509.ParseCertificate(cert.Certificate[0])
if err != nil {
return nil, fmt.Errorf("error parsing certificate: %v", err)
return nil, fmt.Errorf("error parsing certificate: %v", zap.Error(err))
}
// Create TLSConfig

View File

@@ -1,48 +0,0 @@
package broker
// const (
// WorkNum = 4096
// )
var WorkNum int
type Dispatcher struct {
WorkerPool chan chan *Message
}
func StartDispatcher() {
InitMessagePool()
dispatcher := NewDispatcher()
dispatcher.Run()
}
func (d *Dispatcher) Run() {
// starting n number of workers
for i := 0; i < WorkNum; i++ {
worker := NewWorker(d.WorkerPool)
worker.Start()
}
go d.dispatch()
}
func NewDispatcher() *Dispatcher {
pool := make(chan chan *Message, WorkNum)
return &Dispatcher{WorkerPool: pool}
}
func (d *Dispatcher) dispatch() {
for i := 0; i < MessagePoolNum; i++ {
go func(idx int) {
for {
select {
case msg := <-MSGPool[idx].queue:
go func(msg *Message) {
msgChannel := <-d.WorkerPool
msgChannel <- msg
}(msg)
}
}
}(i)
}
}

View File

@@ -1,13 +1,14 @@
/* Copyright (c) 2018, joy.zhou <chowyu08@gmail.com>
*/
package broker
import (
"fmt"
"time"
"github.com/eclipse/paho.mqtt.golang/packets"
simplejson "github.com/bitly/go-simplejson"
log "github.com/cihub/seelog"
"github.com/eclipse/paho.mqtt.golang/packets"
"go.uber.org/zap"
)
func (c *client) SendInfo() {
@@ -19,28 +20,31 @@ func (c *client) SendInfo() {
infoMsg := NewInfo(c.broker.id, url, false)
err := c.WriterPacket(infoMsg)
if err != nil {
log.Error("send info message error, ", err)
log.Error("send info message error, ", zap.Error(err))
return
}
// log.Info("send info success")
}
func (c *client) StartPing() {
timeTicker := time.NewTicker(time.Second * 30)
timeTicker := time.NewTicker(time.Second * 50)
ping := packets.NewControlPacket(packets.Pingreq).(*packets.PingreqPacket)
for {
select {
case <-timeTicker.C:
err := c.WriterPacket(ping)
if err != nil {
log.Error("ping error: ", err)
log.Error("ping error: ", zap.Error(err))
c.Close()
}
case <-c.ctx.Done():
return
}
}
}
func (c *client) SendConnect() {
if c.status == Disconnected {
if c.status != Connected {
return
}
m := packets.NewControlPacket(packets.Connect).(*packets.ConnectPacket)
@@ -50,10 +54,10 @@ func (c *client) SendConnect() {
m.Keepalive = uint16(60)
err := c.WriterPacket(m)
if err != nil {
log.Error("send connect message error, ", err)
log.Error("send connect message error, ", zap.Error(err))
return
}
// log.Info("send connet success")
log.Info("send connect success")
}
func NewInfo(sid, url string, isforword bool) *packets.PublishPacket {
@@ -61,7 +65,7 @@ func NewInfo(sid, url string, isforword bool) *packets.PublishPacket {
pub.Qos = 0
pub.TopicName = BrokerInfoTopic
pub.Retain = false
info := fmt.Sprintf(`{"remoteID":"%s","url":"%s","isForward":%t}`, sid, url, isforword)
info := fmt.Sprintf(`{"brokerID":"%s","brokerUrl":"%s"}`, sid, url)
// log.Info("new info", string(info))
pub.Payload = []byte(info)
return pub
@@ -74,47 +78,36 @@ func (c *client) ProcessInfo(packet *packets.PublishPacket) {
return
}
log.Info("recv remoteInfo: ", string(packet.Payload))
log.Info("recv remoteInfo: ", zap.String("payload", string(packet.Payload)))
js, e := simplejson.NewJson(packet.Payload)
if e != nil {
log.Warn("parse info message err", e)
js, err := simplejson.NewJson(packet.Payload)
if err != nil {
log.Warn("parse info message err", zap.Error(err))
return
}
rid := js.Get("remoteID").MustString()
rurl := js.Get("url").MustString()
isForward := js.Get("isForward").MustBool()
if rid == "" {
log.Error("receive info message error with remoteID is null")
routes, err := js.Get("data").Map()
if routes == nil {
log.Error("receive info message error, ", zap.Error(err))
return
}
if rid == b.id {
if !isForward {
c.Close() //close connet self
b.nodes = routes
b.mu.Lock()
for rid, rurl := range routes {
if rid == b.id {
continue
}
return
}
exist := b.CheckRemoteExist(rid, rurl)
if !exist {
go b.connectRouter(rurl, rid)
}
// log.Info("isforword: ", isForward)
if !isForward {
route := &route{
remoteUrl: rurl,
remoteID: rid,
url, ok := rurl.(string)
if ok {
exist := b.CheckRemoteExist(rid, url)
if !exist {
b.connectRouter(rid, url)
}
}
c.route = route
go b.SendLocalSubsToRouter(c)
// log.Info("BroadcastInfoMessage starting... ")
infoMsg := NewInfo(rid, rurl, true)
b.BroadcastInfoMessage(rid, infoMsg)
}
return
b.mu.Unlock()
}

View File

@@ -1,62 +0,0 @@
package broker
import (
"sync"
"github.com/eclipse/paho.mqtt.golang/packets"
)
const (
MaxUser = 1024 * 1024
MessagePoolNum = 1024
MessagePoolUser = MaxUser / MessagePoolNum
MessagePoolMessageNum = MaxUser / MessagePoolNum * 4
)
type Message struct {
client *client
packet packets.ControlPacket
}
var (
MSGPool []MessagePool
)
type MessagePool struct {
l sync.Mutex
maxuser int
user int
queue chan *Message
}
func InitMessagePool() {
MSGPool = make([]MessagePool, (MessagePoolNum + 2))
for i := 0; i < (MessagePoolNum + 2); i++ {
MSGPool[i].Init(MessagePoolUser, MessagePoolMessageNum)
}
}
func (p *MessagePool) Init(num int, maxusernum int) {
p.maxuser = maxusernum
p.queue = make(chan *Message, num)
}
func (p *MessagePool) GetPool() *MessagePool {
p.l.Lock()
if p.user+1 < p.maxuser {
p.user += 1
p.l.Unlock()
return p
} else {
p.l.Unlock()
return nil
}
}
func (p *MessagePool) Reduce() {
p.l.Lock()
p.user -= 1
p.l.Unlock()
}

View File

@@ -1,122 +0,0 @@
package broker
import (
"sync"
"github.com/eclipse/paho.mqtt.golang/packets"
)
type RetainList struct {
sync.RWMutex
root *rlevel
}
type rlevel struct {
nodes map[string]*rnode
}
type rnode struct {
next *rlevel
msg *packets.PublishPacket
}
type RetainResult struct {
msg []*packets.PublishPacket
}
func newRNode() *rnode {
return &rnode{}
}
func newRLevel() *rlevel {
return &rlevel{nodes: make(map[string]*rnode)}
}
func NewRetainList() *RetainList {
return &RetainList{root: newRLevel()}
}
func (r *RetainList) Insert(topic string, buf *packets.PublishPacket) error {
tokens, err := PublishTopicCheckAndSpilt(topic)
if err != nil {
return err
}
// log.Info("insert tokens:", tokens)
r.Lock()
l := r.root
var n *rnode
for _, t := range tokens {
n = l.nodes[t]
if n == nil {
n = newRNode()
l.nodes[t] = n
}
if n.next == nil {
n.next = newRLevel()
}
l = n.next
}
n.msg = buf
r.Unlock()
return nil
}
func (r *RetainList) Match(topic string) []*packets.PublishPacket {
tokens, err := SubscribeTopicCheckAndSpilt(topic)
if err != nil {
return nil
}
results := &RetainResult{}
r.Lock()
l := r.root
matchRLevel(l, tokens, results)
r.Unlock()
// log.Info("results: ", results)
return results.msg
}
func matchRLevel(l *rlevel, toks []string, results *RetainResult) {
var n *rnode
for i, t := range toks {
if l == nil {
return
}
// log.Info("l info :", l.nodes)
if t == "#" {
for _, n := range l.nodes {
n.GetAll(results)
}
}
if t == "+" {
for _, n := range l.nodes {
if len(t[i+1:]) == 0 {
results.msg = append(results.msg, n.msg)
} else {
matchRLevel(n.next, toks[i+1:], results)
}
}
}
n = l.nodes[t]
if n != nil {
l = n.next
} else {
l = nil
}
}
if n != nil {
results.msg = append(results.msg, n.msg)
}
}
func (r *rnode) GetAll(results *RetainResult) {
// log.Info("node 's message: ", string(r.msg))
if r.msg != nil {
results.msg = append(results.msg, r.msg)
}
l := r.next
for _, n := range l.nodes {
n.GetAll(results)
}
}

53
broker/sesson.go Normal file
View File

@@ -0,0 +1,53 @@
package broker
import "github.com/eclipse/paho.mqtt.golang/packets"
func (b *Broker) getSession(cli *client, req *packets.ConnectPacket, resp *packets.ConnackPacket) error {
// If CleanSession is set to 0, the server MUST resume communications with the
// client based on state from the current session, as identified by the client
// identifier. If there is no session associated with the client identifier the
// server must create a new session.
//
// If CleanSession is set to 1, the client and server must discard any previous
// session and start a new one. b session lasts as long as the network c
// onnection. State data associated with b session must not be reused in any
// subsequent session.
var err error
// Check to see if the client supplied an ID, if not, generate one and set
// clean session.
if len(req.ClientIdentifier) == 0 {
req.CleanSession = true
}
cid := req.ClientIdentifier
// If CleanSession is NOT set, check the session store for existing session.
// If found, return it.
if !req.CleanSession {
if cli.session, err = b.sessionMgr.Get(cid); err == nil {
resp.SessionPresent = true
if err := cli.session.Update(req); err != nil {
return err
}
}
}
// If CleanSession, or no existing session found, then create a new one
if cli.session == nil {
if cli.session, err = b.sessionMgr.New(cid); err != nil {
return err
}
resp.SessionPresent = false
if err := cli.session.Init(req); err != nil {
return err
}
}
return nil
}

View File

@@ -1,318 +0,0 @@
package broker
import (
"errors"
"sync"
log "github.com/cihub/seelog"
)
// A result structure better optimized for queue subs.
type SublistResult struct {
psubs []*subscription
qsubs []*subscription // don't make this a map, too expensive to iterate
}
// A Sublist stores and efficiently retrieves subscriptions.
type Sublist struct {
sync.RWMutex
cache map[string]*SublistResult
root *level
}
// A node contains subscriptions and a pointer to the next level.
type node struct {
next *level
psubs []*subscription
qsubs []*subscription
}
// A level represents a group of nodes and special pointers to
// wildcard nodes.
type level struct {
nodes map[string]*node
}
// Create a new default node.
func newNode() *node {
return &node{psubs: make([]*subscription, 0, 4), qsubs: make([]*subscription, 0, 4)}
}
// Create a new default level. We use FNV1A as the hash
// algortihm for the tokens, which should be short.
func newLevel() *level {
return &level{nodes: make(map[string]*node)}
}
// New will create a default sublist
func NewSublist() *Sublist {
return &Sublist{root: newLevel(), cache: make(map[string]*SublistResult)}
}
// Insert adds a subscription into the sublist
func (s *Sublist) Insert(sub *subscription) error {
tokens, err := SubscribeTopicCheckAndSpilt(sub.topic)
if err != nil {
return err
}
s.Lock()
l := s.root
var n *node
for _, t := range tokens {
n = l.nodes[t]
if n == nil {
n = newNode()
l.nodes[t] = n
}
if n.next == nil {
n.next = newLevel()
}
l = n.next
}
if sub.queue {
//check qsub is already exist
for i := range n.qsubs {
if equal(n.qsubs[i], sub) {
n.qsubs[i] = sub
return nil
}
}
n.qsubs = append(n.qsubs, sub)
} else {
//check psub is already exist
for i := range n.psubs {
if equal(n.psubs[i], sub) {
n.psubs[i] = sub
return nil
}
}
n.psubs = append(n.psubs, sub)
}
topic := string(sub.topic)
s.addToCache(topic, sub)
s.Unlock()
return nil
}
func (s *Sublist) addToCache(topic string, sub *subscription) {
for k, r := range s.cache {
if matchLiteral(k, topic) {
// Copy since others may have a reference.
nr := copyResult(r)
if sub.queue == false {
nr.psubs = append(nr.psubs, sub)
} else {
nr.qsubs = append(nr.qsubs, sub)
}
s.cache[k] = nr
}
}
}
func (s *Sublist) removeFromCache(topic string, sub *subscription) {
for k := range s.cache {
if !matchLiteral(k, topic) {
continue
}
// Since someone else may be referecing, can't modify the list
// safely, just let it re-populate.
delete(s.cache, k)
}
}
func matchLiteral(literal, topic string) bool {
tok, _ := SubscribeTopicCheckAndSpilt(topic)
li, _ := PublishTopicCheckAndSpilt(literal)
for i := 0; i < len(tok); i++ {
b := tok[i]
switch b {
case "+":
case "#":
return true
default:
if b != li[i] {
return false
}
}
}
return true
}
// Deep copy
func copyResult(r *SublistResult) *SublistResult {
nr := &SublistResult{}
nr.psubs = append([]*subscription(nil), r.psubs...)
nr.qsubs = append([]*subscription(nil), r.qsubs...)
return nr
}
func (s *Sublist) Remove(sub *subscription) error {
tokens, err := SubscribeTopicCheckAndSpilt(sub.topic)
if err != nil {
return err
}
s.Lock()
defer s.Unlock()
l := s.root
var n *node
for _, t := range tokens {
if l == nil {
return errors.New("No Matches subscription Found")
}
n = l.nodes[t]
if n != nil {
l = n.next
} else {
l = nil
}
}
if !s.removeFromNode(n, sub) {
return errors.New("No Matches subscription Found")
}
topic := string(sub.topic)
s.removeFromCache(topic, sub)
return nil
}
func (s *Sublist) removeFromNode(n *node, sub *subscription) (found bool) {
if n == nil {
return false
}
if sub.queue {
n.qsubs, found = removeSubFromList(sub, n.qsubs)
return found
} else {
n.psubs, found = removeSubFromList(sub, n.psubs)
return found
}
return false
}
func (s *Sublist) Match(topic string) *SublistResult {
s.RLock()
rc, ok := s.cache[topic]
s.RUnlock()
if ok {
return rc
}
tokens, err := PublishTopicCheckAndSpilt(topic)
if err != nil {
log.Error("\tserver/sublist.go: ", err)
return nil
}
result := &SublistResult{}
s.Lock()
l := s.root
if len(tokens) > 0 {
if tokens[0] == "/" {
if _, exist := l.nodes["#"]; exist {
addNodeToResults(l.nodes["#"], result)
}
if _, exist := l.nodes["+"]; exist {
matchLevel(l.nodes["/"].next, tokens[1:], result)
}
if _, exist := l.nodes["/"]; exist {
matchLevel(l.nodes["/"].next, tokens[1:], result)
}
} else {
matchLevel(s.root, tokens, result)
}
}
s.cache[topic] = result
if len(s.cache) > 1024 {
for k := range s.cache {
delete(s.cache, k)
break
}
}
s.Unlock()
// log.Info("SublistResult: ", result)
return result
}
func matchLevel(l *level, toks []string, results *SublistResult) {
var swc, n *node
exist := false
for i, t := range toks {
if l == nil {
return
}
if _, exist = l.nodes["#"]; exist {
addNodeToResults(l.nodes["#"], results)
}
if t != "/" {
if swc, exist = l.nodes["+"]; exist {
matchLevel(l.nodes["+"].next, toks[i+1:], results)
}
} else {
if _, exist = l.nodes["+"]; exist {
addNodeToResults(l.nodes["+"], results)
}
}
n = l.nodes[t]
if n != nil {
l = n.next
} else {
l = nil
}
}
if n != nil {
addNodeToResults(n, results)
}
if swc != nil {
addNodeToResults(n, results)
}
}
// This will add in a node's results to the total results.
func addNodeToResults(n *node, results *SublistResult) {
results.psubs = append(results.psubs, n.psubs...)
results.qsubs = append(results.qsubs, n.qsubs...)
}
func removeSubFromList(sub *subscription, sl []*subscription) ([]*subscription, bool) {
for i := 0; i < len(sl); i++ {
if sl[i] == sub {
last := len(sl) - 1
sl[i] = sl[last]
sl[last] = nil
sl = sl[:last]
// log.Info("removeSubFromList success")
return shrinkAsNeeded(sl), true
}
}
return sl, false
}
// Checks if we need to do a resize. This is for very large growth then
// subsequent return to a more normal size from unsubscribe.
func shrinkAsNeeded(sl []*subscription) []*subscription {
lsl := len(sl)
csl := cap(sl)
// Don't bother if list not too big
if csl <= 8 {
return sl
}
pFree := float32(csl-lsl) / float32(csl)
if pFree > 0.50 {
return append([]*subscription(nil), sl...)
}
return sl
}

24
broker/usage.go Normal file
View File

@@ -0,0 +1,24 @@
package broker
var usageStr = `
Usage: hmq [options]
Broker Options:
-w, --worker <number> Worker num to process message, perfer (client num)/10. (default 1024)
-p, --port <port> Use port for clients (default: 1883)
--host <host> Network host to listen on. (default "0.0.0.0")
-ws, --wsport <port> Use port for websocket monitoring
-wsp,--wspath <path> Use path for websocket monitoring
-c, --config <file> Configuration file
Logging Options:
-d, --debug <bool> Enable debugging output (default false)
-D Debug and trace
Cluster Options:
-r, --router <rurl> Router who maintenance cluster info
-cp, --clusterport <cluster-port> Cluster listen port for others
Common Options:
-h, --help Show this message
`

View File

@@ -1,37 +0,0 @@
package broker
type Worker struct {
WorkerPool chan chan *Message
MsgChannel chan *Message
quit chan bool
}
func NewWorker(workerPool chan chan *Message) Worker {
return Worker{
WorkerPool: workerPool,
MsgChannel: make(chan *Message),
quit: make(chan bool)}
}
func (w Worker) Start() {
go func() {
for {
// register the current worker into the worker queue.
w.WorkerPool <- w.MsgChannel
select {
case msg := <-w.MsgChannel:
// we have received a work request.
ProcessMessage(msg)
case <-w.quit:
return
}
}
}()
}
// Stop signals the worker to stop listening for work requests.
func (w Worker) Stop() {
go func() {
w.quit <- true
}()
}

View File

@@ -4,9 +4,9 @@
"host": "0.0.0.0",
"cluster": {
"host": "0.0.0.0",
"port": "1993",
"routes": []
"port": "1993"
},
"router": "127.0.0.1:9888",
"tlsPort": "8883",
"tlsHost": "0.0.0.0",
"wsPort": "1888",

View File

@@ -1,3 +1,5 @@
/* Copyright (c) 2018, joy.zhou <chowyu08@gmail.com>
*/
package acl
import (

View File

@@ -1,3 +1,4 @@
/* Copyright (c) 2018, joy.zhou <chowyu08@gmail.com>*/
package acl
import "strings"

View File

@@ -1,3 +1,5 @@
/* Copyright (c) 2018, joy.zhou <chowyu08@gmail.com>
*/
package acl
import (

View File

@@ -0,0 +1,76 @@
// Copyright (c) 2014 The SurgeMQ Authors. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package sessions
import (
"fmt"
"sync"
)
var _ SessionsProvider = (*memProvider)(nil)
func init() {
Register("mem", NewMemProvider())
}
type memProvider struct {
st map[string]*Session
mu sync.RWMutex
}
func NewMemProvider() *memProvider {
return &memProvider{
st: make(map[string]*Session),
}
}
func (this *memProvider) New(id string) (*Session, error) {
this.mu.Lock()
defer this.mu.Unlock()
this.st[id] = &Session{id: id}
return this.st[id], nil
}
func (this *memProvider) Get(id string) (*Session, error) {
this.mu.RLock()
defer this.mu.RUnlock()
sess, ok := this.st[id]
if !ok {
return nil, fmt.Errorf("store/Get: No session found for key %s", id)
}
return sess, nil
}
func (this *memProvider) Del(id string) {
this.mu.Lock()
defer this.mu.Unlock()
delete(this.st, id)
}
func (this *memProvider) Save(id string) error {
return nil
}
func (this *memProvider) Count() int {
return len(this.st)
}
func (this *memProvider) Close() error {
this.st = make(map[string]*Session)
return nil
}

View File

@@ -0,0 +1,95 @@
package sessions
import (
"time"
log "github.com/cihub/seelog"
"github.com/go-redis/redis"
jsoniter "github.com/json-iterator/go"
)
var redisClient *redis.Client
var _ SessionsProvider = (*redisProvider)(nil)
const (
sessionName = "session"
)
type redisProvider struct {
}
func init() {
Register("redis", NewRedisProvider())
}
func InitRedisConn(url string) {
redisClient = redis.NewClient(&redis.Options{
Addr: "127.0.0.1:6379",
Password: "", // no password set
DB: 0, // use default DB
})
err := redisClient.Ping().Err()
for err != nil {
log.Error("connect redis error: ", err, " 3s try again...")
time.Sleep(3 * time.Second)
err = redisClient.Ping().Err()
}
}
func NewRedisProvider() *redisProvider {
return &redisProvider{}
}
func (r *redisProvider) New(id string) (*Session, error) {
val, _ := jsoniter.Marshal(&Session{id: id})
err := redisClient.HSet(sessionName, id, val).Err()
if err != nil {
return nil, err
}
result, err := redisClient.HGet(sessionName, id).Bytes()
if err != nil {
return nil, err
}
sess := Session{}
err = jsoniter.Unmarshal(result, &sess)
if err != nil {
return nil, err
}
return &sess, nil
}
func (r *redisProvider) Get(id string) (*Session, error) {
result, err := redisClient.HGet(sessionName, id).Bytes()
if err != nil {
return nil, err
}
sess := Session{}
err = jsoniter.Unmarshal(result, &sess)
if err != nil {
return nil, err
}
return &sess, nil
}
func (r *redisProvider) Del(id string) {
redisClient.HDel(sessionName, id)
}
func (r *redisProvider) Save(id string) error {
return nil
}
func (r *redisProvider) Count() int {
return int(redisClient.HLen(sessionName).Val())
}
func (r *redisProvider) Close() error {
return redisClient.Del(sessionName).Err()
}

149
lib/sessions/session.go Normal file
View File

@@ -0,0 +1,149 @@
package sessions
import (
"fmt"
"sync"
"github.com/eclipse/paho.mqtt.golang/packets"
)
const (
// Queue size for the ack queue
defaultQueueSize = 16
)
type Session struct {
// cmsg is the CONNECT message
cmsg *packets.ConnectPacket
// Will message to publish if connect is closed unexpectedly
Will *packets.PublishPacket
// Retained publish message
Retained *packets.PublishPacket
// topics stores all the topis for this session/client
topics map[string]byte
// Initialized?
initted bool
// Serialize access to this session
mu sync.Mutex
id string
}
func (this *Session) Init(msg *packets.ConnectPacket) error {
this.mu.Lock()
defer this.mu.Unlock()
if this.initted {
return fmt.Errorf("Session already initialized")
}
this.cmsg = msg
if this.cmsg.WillFlag {
this.Will = packets.NewControlPacket(packets.Publish).(*packets.PublishPacket)
this.Will.Qos = this.cmsg.Qos
this.Will.TopicName = this.cmsg.WillTopic
this.Will.Payload = this.cmsg.WillMessage
this.Will.Retain = this.cmsg.WillRetain
}
this.topics = make(map[string]byte, 1)
this.id = string(msg.ClientIdentifier)
this.initted = true
return nil
}
func (this *Session) Update(msg *packets.ConnectPacket) error {
this.mu.Lock()
defer this.mu.Unlock()
this.cmsg = msg
return nil
}
func (this *Session) RetainMessage(msg *packets.PublishPacket) error {
this.mu.Lock()
defer this.mu.Unlock()
this.Retained = msg
return nil
}
func (this *Session) AddTopic(topic string, qos byte) error {
this.mu.Lock()
defer this.mu.Unlock()
if !this.initted {
return fmt.Errorf("Session not yet initialized")
}
this.topics[topic] = qos
return nil
}
func (this *Session) RemoveTopic(topic string) error {
this.mu.Lock()
defer this.mu.Unlock()
if !this.initted {
return fmt.Errorf("Session not yet initialized")
}
delete(this.topics, topic)
return nil
}
func (this *Session) Topics() ([]string, []byte, error) {
this.mu.Lock()
defer this.mu.Unlock()
if !this.initted {
return nil, nil, fmt.Errorf("Session not yet initialized")
}
var (
topics []string
qoss []byte
)
for k, v := range this.topics {
topics = append(topics, k)
qoss = append(qoss, v)
}
return topics, qoss, nil
}
func (this *Session) ID() string {
return this.cmsg.ClientIdentifier
}
func (this *Session) WillFlag() bool {
this.mu.Lock()
defer this.mu.Unlock()
return this.cmsg.WillFlag
}
func (this *Session) SetWillFlag(v bool) {
this.mu.Lock()
defer this.mu.Unlock()
this.cmsg.WillFlag = v
}
func (this *Session) CleanSession() bool {
this.mu.Lock()
defer this.mu.Unlock()
return this.cmsg.CleanSession
}

92
lib/sessions/sessions.go Normal file
View File

@@ -0,0 +1,92 @@
package sessions
import (
"crypto/rand"
"encoding/base64"
"errors"
"fmt"
"io"
)
var (
ErrSessionsProviderNotFound = errors.New("Session: Session provider not found")
ErrKeyNotAvailable = errors.New("Session: not item found for key.")
providers = make(map[string]SessionsProvider)
)
type SessionsProvider interface {
New(id string) (*Session, error)
Get(id string) (*Session, error)
Del(id string)
Save(id string) error
Count() int
Close() error
}
// Register makes a session provider available by the provided name.
// If a Register is called twice with the same name or if the driver is nil,
// it panics.
func Register(name string, provider SessionsProvider) {
if provider == nil {
panic("session: Register provide is nil")
}
if _, dup := providers[name]; dup {
panic("session: Register called twice for provider " + name)
}
providers[name] = provider
}
func Unregister(name string) {
delete(providers, name)
}
type Manager struct {
p SessionsProvider
}
func NewManager(providerName string) (*Manager, error) {
p, ok := providers[providerName]
if !ok {
return nil, fmt.Errorf("session: unknown provider %q", providerName)
}
return &Manager{p: p}, nil
}
func (this *Manager) New(id string) (*Session, error) {
if id == "" {
id = this.sessionId()
}
return this.p.New(id)
}
func (this *Manager) Get(id string) (*Session, error) {
return this.p.Get(id)
}
func (this *Manager) Del(id string) {
this.p.Del(id)
}
func (this *Manager) Save(id string) error {
return this.p.Save(id)
}
func (this *Manager) Count() int {
return this.p.Count()
}
func (this *Manager) Close() error {
return this.p.Close()
}
func (manager *Manager) sessionId() string {
b := make([]byte, 15)
if _, err := io.ReadFull(rand.Reader, b); err != nil {
return ""
}
return base64.URLEncoding.EncodeToString(b)
}

549
lib/topics/memtopics.go Normal file
View File

@@ -0,0 +1,549 @@
package topics
import (
"fmt"
"reflect"
"sync"
"github.com/eclipse/paho.mqtt.golang/packets"
)
const (
QosAtMostOnce byte = iota
QosAtLeastOnce
QosExactlyOnce
QosFailure = 0x80
)
var _ TopicsProvider = (*memTopics)(nil)
type memTopics struct {
// Sub/unsub mutex
smu sync.RWMutex
// Subscription tree
sroot *snode
// Retained message mutex
rmu sync.RWMutex
// Retained messages topic tree
rroot *rnode
}
func init() {
Register("mem", NewMemProvider())
}
// NewMemProvider returns an new instance of the memTopics, which is implements the
// TopicsProvider interface. memProvider is a hidden struct that stores the topic
// subscriptions and retained messages in memory. The content is not persistend so
// when the server goes, everything will be gone. Use with care.
func NewMemProvider() *memTopics {
return &memTopics{
sroot: newSNode(),
rroot: newRNode(),
}
}
func ValidQos(qos byte) bool {
return qos == QosAtMostOnce || qos == QosAtLeastOnce || qos == QosExactlyOnce
}
func (this *memTopics) Subscribe(topic []byte, qos byte, sub interface{}) (byte, error) {
if !ValidQos(qos) {
return QosFailure, fmt.Errorf("Invalid QoS %d", qos)
}
if sub == nil {
return QosFailure, fmt.Errorf("Subscriber cannot be nil")
}
this.smu.Lock()
defer this.smu.Unlock()
if qos > QosExactlyOnce {
qos = QosExactlyOnce
}
if err := this.sroot.sinsert(topic, qos, sub); err != nil {
return QosFailure, err
}
return qos, nil
}
func (this *memTopics) Unsubscribe(topic []byte, sub interface{}) error {
this.smu.Lock()
defer this.smu.Unlock()
return this.sroot.sremove(topic, sub)
}
// Returned values will be invalidated by the next Subscribers call
func (this *memTopics) Subscribers(topic []byte, qos byte, subs *[]interface{}, qoss *[]byte) error {
if !ValidQos(qos) {
return fmt.Errorf("Invalid QoS %d", qos)
}
this.smu.RLock()
defer this.smu.RUnlock()
*subs = (*subs)[0:0]
*qoss = (*qoss)[0:0]
return this.sroot.smatch(topic, qos, subs, qoss)
}
func (this *memTopics) Retain(msg *packets.PublishPacket) error {
this.rmu.Lock()
defer this.rmu.Unlock()
// So apparently, at least according to the MQTT Conformance/Interoperability
// Testing, that a payload of 0 means delete the retain message.
// https://eclipse.org/paho/clients/testing/
if len(msg.Payload) == 0 {
return this.rroot.rremove([]byte(msg.TopicName))
}
return this.rroot.rinsert([]byte(msg.TopicName), msg)
}
func (this *memTopics) Retained(topic []byte, msgs *[]*packets.PublishPacket) error {
this.rmu.RLock()
defer this.rmu.RUnlock()
return this.rroot.rmatch(topic, msgs)
}
func (this *memTopics) Close() error {
this.sroot = nil
this.rroot = nil
return nil
}
// subscrition nodes
type snode struct {
// If this is the end of the topic string, then add subscribers here
subs []interface{}
qos []byte
// Otherwise add the next topic level here
snodes map[string]*snode
}
func newSNode() *snode {
return &snode{
snodes: make(map[string]*snode),
}
}
func (this *snode) sinsert(topic []byte, qos byte, sub interface{}) error {
// If there's no more topic levels, that means we are at the matching snode
// to insert the subscriber. So let's see if there's such subscriber,
// if so, update it. Otherwise insert it.
if len(topic) == 0 {
// Let's see if the subscriber is already on the list. If yes, update
// QoS and then return.
for i := range this.subs {
if equal(this.subs[i], sub) {
this.qos[i] = qos
return nil
}
}
// Otherwise add.
this.subs = append(this.subs, sub)
this.qos = append(this.qos, qos)
return nil
}
// Not the last level, so let's find or create the next level snode, and
// recursively call it's insert().
// ntl = next topic level
ntl, rem, err := nextTopicLevel(topic)
if err != nil {
return err
}
level := string(ntl)
// Add snode if it doesn't already exist
n, ok := this.snodes[level]
if !ok {
n = newSNode()
this.snodes[level] = n
}
return n.sinsert(rem, qos, sub)
}
// This remove implementation ignores the QoS, as long as the subscriber
// matches then it's removed
func (this *snode) sremove(topic []byte, sub interface{}) error {
// If the topic is empty, it means we are at the final matching snode. If so,
// let's find the matching subscribers and remove them.
if len(topic) == 0 {
// If subscriber == nil, then it's signal to remove ALL subscribers
if sub == nil {
this.subs = this.subs[0:0]
this.qos = this.qos[0:0]
return nil
}
// If we find the subscriber then remove it from the list. Technically
// we just overwrite the slot by shifting all other items up by one.
for i := range this.subs {
if equal(this.subs[i], sub) {
this.subs = append(this.subs[:i], this.subs[i+1:]...)
this.qos = append(this.qos[:i], this.qos[i+1:]...)
return nil
}
}
return fmt.Errorf("No topic found for subscriber")
}
// Not the last level, so let's find the next level snode, and recursively
// call it's remove().
// ntl = next topic level
ntl, rem, err := nextTopicLevel(topic)
if err != nil {
return err
}
level := string(ntl)
// Find the snode that matches the topic level
n, ok := this.snodes[level]
if !ok {
return fmt.Errorf("No topic found")
}
// Remove the subscriber from the next level snode
if err := n.sremove(rem, sub); err != nil {
return err
}
// If there are no more subscribers and snodes to the next level we just visited
// let's remove it
if len(n.subs) == 0 && len(n.snodes) == 0 {
delete(this.snodes, level)
}
return nil
}
// smatch() returns all the subscribers that are subscribed to the topic. Given a topic
// with no wildcards (publish topic), it returns a list of subscribers that subscribes
// to the topic. For each of the level names, it's a match
// - if there are subscribers to '#', then all the subscribers are added to result set
func (this *snode) smatch(topic []byte, qos byte, subs *[]interface{}, qoss *[]byte) error {
// If the topic is empty, it means we are at the final matching snode. If so,
// let's find the subscribers that match the qos and append them to the list.
if len(topic) == 0 {
this.matchQos(qos, subs, qoss)
return nil
}
// ntl = next topic level
ntl, rem, err := nextTopicLevel(topic)
if err != nil {
return err
}
level := string(ntl)
for k, n := range this.snodes {
// If the key is "#", then these subscribers are added to the result set
if k == MWC {
n.matchQos(qos, subs, qoss)
} else if k == SWC || k == level {
if err := n.smatch(rem, qos, subs, qoss); err != nil {
return err
}
}
}
return nil
}
// retained message nodes
type rnode struct {
// If this is the end of the topic string, then add retained messages here
msg *packets.PublishPacket
// Otherwise add the next topic level here
rnodes map[string]*rnode
}
func newRNode() *rnode {
return &rnode{
rnodes: make(map[string]*rnode),
}
}
func (this *rnode) rinsert(topic []byte, msg *packets.PublishPacket) error {
// If there's no more topic levels, that means we are at the matching rnode.
if len(topic) == 0 {
// Reuse the message if possible
if this.msg == nil {
this.msg = msg
}
return nil
}
// Not the last level, so let's find or create the next level snode, and
// recursively call it's insert().
// ntl = next topic level
ntl, rem, err := nextTopicLevel(topic)
if err != nil {
return err
}
level := string(ntl)
// Add snode if it doesn't already exist
n, ok := this.rnodes[level]
if !ok {
n = newRNode()
this.rnodes[level] = n
}
return n.rinsert(rem, msg)
}
// Remove the retained message for the supplied topic
func (this *rnode) rremove(topic []byte) error {
// If the topic is empty, it means we are at the final matching rnode. If so,
// let's remove the buffer and message.
if len(topic) == 0 {
this.msg = nil
return nil
}
// Not the last level, so let's find the next level rnode, and recursively
// call it's remove().
// ntl = next topic level
ntl, rem, err := nextTopicLevel(topic)
if err != nil {
return err
}
level := string(ntl)
// Find the rnode that matches the topic level
n, ok := this.rnodes[level]
if !ok {
return fmt.Errorf("No topic found")
}
// Remove the subscriber from the next level rnode
if err := n.rremove(rem); err != nil {
return err
}
// If there are no more rnodes to the next level we just visited let's remove it
if len(n.rnodes) == 0 {
delete(this.rnodes, level)
}
return nil
}
// rmatch() finds the retained messages for the topic and qos provided. It's somewhat
// of a reverse match compare to match() since the supplied topic can contain
// wildcards, whereas the retained message topic is a full (no wildcard) topic.
func (this *rnode) rmatch(topic []byte, msgs *[]*packets.PublishPacket) error {
// If the topic is empty, it means we are at the final matching rnode. If so,
// add the retained msg to the list.
if len(topic) == 0 {
if this.msg != nil {
*msgs = append(*msgs, this.msg)
}
return nil
}
// ntl = next topic level
ntl, rem, err := nextTopicLevel(topic)
if err != nil {
return err
}
level := string(ntl)
if level == MWC {
// If '#', add all retained messages starting this node
this.allRetained(msgs)
} else if level == SWC {
// If '+', check all nodes at this level. Next levels must be matched.
for _, n := range this.rnodes {
if err := n.rmatch(rem, msgs); err != nil {
return err
}
}
} else {
// Otherwise, find the matching node, go to the next level
if n, ok := this.rnodes[level]; ok {
if err := n.rmatch(rem, msgs); err != nil {
return err
}
}
}
return nil
}
func (this *rnode) allRetained(msgs *[]*packets.PublishPacket) {
if this.msg != nil {
*msgs = append(*msgs, this.msg)
}
for _, n := range this.rnodes {
n.allRetained(msgs)
}
}
const (
stateCHR byte = iota // Regular character
stateMWC // Multi-level wildcard
stateSWC // Single-level wildcard
stateSEP // Topic level separator
stateSYS // System level topic ($)
)
// Returns topic level, remaining topic levels and any errors
func nextTopicLevel(topic []byte) ([]byte, []byte, error) {
s := stateCHR
for i, c := range topic {
switch c {
case '/':
if s == stateMWC {
return nil, nil, fmt.Errorf("Multi-level wildcard found in topic and it's not at the last level")
}
if i == 0 {
return []byte(SWC), topic[i+1:], nil
}
return topic[:i], topic[i+1:], nil
case '#':
if i != 0 {
return nil, nil, fmt.Errorf("Wildcard character '#' must occupy entire topic level")
}
s = stateMWC
case '+':
if i != 0 {
return nil, nil, fmt.Errorf("Wildcard character '+' must occupy entire topic level")
}
s = stateSWC
// case '$':
// if i == 0 {
// return nil, nil, fmt.Errorf("Cannot publish to $ topics")
// }
// s = stateSYS
default:
if s == stateMWC || s == stateSWC {
return nil, nil, fmt.Errorf("Wildcard characters '#' and '+' must occupy entire topic level")
}
s = stateCHR
}
}
// If we got here that means we didn't hit the separator along the way, so the
// topic is either empty, or does not contain a separator. Either way, we return
// the full topic
return topic, nil, nil
}
// The QoS of the payload messages sent in response to a subscription must be the
// minimum of the QoS of the originally published message (in this case, it's the
// qos parameter) and the maximum QoS granted by the server (in this case, it's
// the QoS in the topic tree).
//
// It's also possible that even if the topic matches, the subscriber is not included
// due to the QoS granted is lower than the published message QoS. For example,
// if the client is granted only QoS 0, and the publish message is QoS 1, then this
// client is not to be send the published message.
func (this *snode) matchQos(qos byte, subs *[]interface{}, qoss *[]byte) {
for _, sub := range this.subs {
// If the published QoS is higher than the subscriber QoS, then we skip the
// subscriber. Otherwise, add to the list.
// if qos >= this.qos[i] {
*subs = append(*subs, sub)
*qoss = append(*qoss, qos)
// }
}
}
func equal(k1, k2 interface{}) bool {
if reflect.TypeOf(k1) != reflect.TypeOf(k2) {
return false
}
if reflect.ValueOf(k1).Kind() == reflect.Func {
return &k1 == &k2
}
if k1 == k2 {
return true
}
switch k1 := k1.(type) {
case string:
return k1 == k2.(string)
case int64:
return k1 == k2.(int64)
case int32:
return k1 == k2.(int32)
case int16:
return k1 == k2.(int16)
case int8:
return k1 == k2.(int8)
case int:
return k1 == k2.(int)
case float32:
return k1 == k2.(float32)
case float64:
return k1 == k2.(float64)
case uint:
return k1 == k2.(uint)
case uint8:
return k1 == k2.(uint8)
case uint16:
return k1 == k2.(uint16)
case uint32:
return k1 == k2.(uint32)
case uint64:
return k1 == k2.(uint64)
case uintptr:
return k1 == k2.(uintptr)
}
return false
}

91
lib/topics/topics.go Normal file
View File

@@ -0,0 +1,91 @@
package topics
import (
"fmt"
"github.com/eclipse/paho.mqtt.golang/packets"
)
const (
// MWC is the multi-level wildcard
MWC = "#"
// SWC is the single level wildcard
SWC = "+"
// SEP is the topic level separator
SEP = "/"
// SYS is the starting character of the system level topics
SYS = "$"
// Both wildcards
_WC = "#+"
)
var (
providers = make(map[string]TopicsProvider)
)
// TopicsProvider
type TopicsProvider interface {
Subscribe(topic []byte, qos byte, subscriber interface{}) (byte, error)
Unsubscribe(topic []byte, subscriber interface{}) error
Subscribers(topic []byte, qos byte, subs *[]interface{}, qoss *[]byte) error
Retain(msg *packets.PublishPacket) error
Retained(topic []byte, msgs *[]*packets.PublishPacket) error
Close() error
}
func Register(name string, provider TopicsProvider) {
if provider == nil {
panic("topics: Register provide is nil")
}
if _, dup := providers[name]; dup {
panic("topics: Register called twice for provider " + name)
}
providers[name] = provider
}
func Unregister(name string) {
delete(providers, name)
}
type Manager struct {
p TopicsProvider
}
func NewManager(providerName string) (*Manager, error) {
p, ok := providers[providerName]
if !ok {
return nil, fmt.Errorf("session: unknown provider %q", providerName)
}
return &Manager{p: p}, nil
}
func (this *Manager) Subscribe(topic []byte, qos byte, subscriber interface{}) (byte, error) {
return this.p.Subscribe(topic, qos, subscriber)
}
func (this *Manager) Unsubscribe(topic []byte, subscriber interface{}) error {
return this.p.Unsubscribe(topic, subscriber)
}
func (this *Manager) Subscribers(topic []byte, qos byte, subs *[]interface{}, qoss *[]byte) error {
return this.p.Subscribers(topic, qos, subs, qoss)
}
func (this *Manager) Retain(msg *packets.PublishPacket) error {
return this.p.Retain(msg)
}
func (this *Manager) Retained(topic []byte, msgs *[]*packets.PublishPacket) error {
return this.p.Retained(topic, msgs)
}
func (this *Manager) Close() error {
return this.p.Close()
}

50
logger/logger.go Normal file
View File

@@ -0,0 +1,50 @@
/* Copyright (c) 2018, joy.zhou <chowyu08@gmail.com>
*/
package logger
import (
"go.uber.org/zap"
)
var (
// env can be setup at build time with Go Linker. Value could be prod or whatever else for dev env
instance *zap.Logger
logCfg zap.Config
)
// NewDevLogger return a logger for dev builds
func NewDevLogger() (*zap.Logger, error) {
logCfg := zap.NewDevelopmentConfig()
return logCfg.Build()
}
// NewProdLogger return a logger for production builds
func NewProdLogger() (*zap.Logger, error) {
logCfg := zap.NewProductionConfig()
logCfg.DisableStacktrace = true
logCfg.Level = zap.NewAtomicLevelAt(zap.InfoLevel)
return logCfg.Build()
}
func InitLogger(debug bool) {
var err error
var log *zap.Logger
if debug {
log, err = NewDevLogger()
} else {
log, err = NewProdLogger()
}
if err != nil {
panic("Unable to create a logger.")
}
defer log.Sync()
log.Debug("Logger initialization succeeded")
instance = log.Named("hmq")
}
// Get return a *zap.Logger instance
func Get() *zap.Logger {
return instance
}

32
logger/logger_test.go Normal file
View File

@@ -0,0 +1,32 @@
/* Copyright (c) 2018, joy.zhou <chowyu08@gmail.com>
*/
package logger
import (
"testing"
"github.com/stretchr/testify/assert"
"go.uber.org/zap"
)
func TestGet(t *testing.T) {
var l *zap.Logger
logger := Get()
assert.NotNil(t, logger)
assert.IsType(t, l, logger)
}
func TestNewDevLogger(t *testing.T) {
logger, err := NewDevLogger()
assert.Nil(t, err)
assert.True(t, logger.Core().Enabled(zap.DebugLevel))
}
func TestNewProdLogger(t *testing.T) {
logger, err := NewProdLogger()
assert.Nil(t, err)
assert.False(t, logger.Core().Enabled(zap.DebugLevel))
}

39
main.go
View File

@@ -1,50 +1,37 @@
/* Copyright (c) 2018, joy.zhou <chowyu08@gmail.com>
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
*/
package main
import (
"hmq/broker"
"fmt"
"os"
"os/signal"
"runtime"
log "github.com/cihub/seelog"
"github.com/fhmq/hmq/broker"
)
func init() {
testConfig := `
<seelog type="sync">
<outputs formatid="main">
<console/>
</outputs>
<formats>
<format id="main" format="Time:%Date %Time%tfile:%File%tlevel:%LEVEL%t%Msg%n"/>
</formats>
</seelog>`
logger, err := log.LoggerFromConfigAsBytes([]byte(testConfig))
if err != nil {
panic(err)
}
log.ReplaceLogger(logger)
}
func main() {
runtime.GOMAXPROCS(runtime.NumCPU())
config, er := broker.LoadConfig()
if er != nil {
log.Error("Load Config file error: ", er)
config, err := broker.ConfigureConfig(os.Args[1:])
if err != nil {
fmt.Println("configure broker config error: ", err)
return
}
broker.StartDispatcher()
b, err := broker.NewBroker(config)
if err != nil {
log.Error("New Broker error: ", er)
fmt.Println("New Broker error: ", err)
return
}
b.Start()
s := waitForSignal()
log.Infof("signal got: %v ,broker closed.", s)
fmt.Println("signal received, broker closed.", s)
}
func waitForSignal() os.Signal {

166
pool/pool.go Normal file
View File

@@ -0,0 +1,166 @@
package pool
import "time"
const (
// This value is the size of the queue that workers register their
// availability to the dispatcher. There may be hundreds of workers, but
// only a small channel is needed to register some of the workers.
readyQueueSize = 16
// If worker pool receives no new work for this period of time, then stop
// a worker goroutine.
idleTimeoutSec = 5
)
type WorkerPool struct {
maxWorkers int
timeout time.Duration
taskQueue chan func()
readyWorkers chan chan func()
stoppedChan chan struct{}
}
func New(maxWorkers int) *WorkerPool {
// There must be at least one worker.
if maxWorkers < 1 {
maxWorkers = 1
}
// taskQueue is unbuffered since items are always removed immediately.
pool := &WorkerPool{
taskQueue: make(chan func()),
maxWorkers: maxWorkers,
readyWorkers: make(chan chan func(), readyQueueSize),
timeout: time.Second * idleTimeoutSec,
stoppedChan: make(chan struct{}),
}
// Start the task dispatcher.
go pool.dispatch()
return pool
}
func (p *WorkerPool) Stop() {
if p.Stopped() {
return
}
close(p.taskQueue)
<-p.stoppedChan
}
func (p *WorkerPool) Stopped() bool {
select {
case <-p.stoppedChan:
return true
default:
}
return false
}
func (p *WorkerPool) Submit(task func()) {
if task != nil {
p.taskQueue <- task
}
}
func (p *WorkerPool) SubmitWait(task func()) {
if task == nil {
return
}
doneChan := make(chan struct{})
p.taskQueue <- func() {
task()
close(doneChan)
}
<-doneChan
}
func (p *WorkerPool) dispatch() {
defer close(p.stoppedChan)
timeout := time.NewTimer(p.timeout)
var workerCount int
var task func()
var ok bool
var workerTaskChan chan func()
startReady := make(chan chan func())
Loop:
for {
timeout.Reset(p.timeout)
select {
case task, ok = <-p.taskQueue:
if !ok {
break Loop
}
// Got a task to do.
select {
case workerTaskChan = <-p.readyWorkers:
// A worker is ready, so give task to worker.
workerTaskChan <- task
default:
// No workers ready.
// Create a new worker, if not at max.
if workerCount < p.maxWorkers {
workerCount++
go func(t func()) {
startWorker(startReady, p.readyWorkers)
// Submit the task when the new worker.
taskChan := <-startReady
taskChan <- t
}(task)
} else {
// Start a goroutine to submit the task when an existing
// worker is ready.
go func(t func()) {
taskChan := <-p.readyWorkers
taskChan <- t
}(task)
}
}
case <-timeout.C:
// Timed out waiting for work to arrive. Kill a ready worker.
if workerCount > 0 {
select {
case workerTaskChan = <-p.readyWorkers:
// A worker is ready, so kill.
close(workerTaskChan)
workerCount--
default:
// No work, but no ready workers. All workers are busy.
}
}
}
}
// Stop all remaining workers as they become ready.
for workerCount > 0 {
workerTaskChan = <-p.readyWorkers
close(workerTaskChan)
workerCount--
}
}
func startWorker(startReady, readyWorkers chan chan func()) {
go func() {
taskChan := make(chan func())
var task func()
var ok bool
// Register availability on starReady channel.
startReady <- taskChan
for {
// Read task from dispatcher.
task, ok = <-taskChan
if !ok {
// Dispatcher has told worker to stop.
break
}
// Execute the task.
task()
// Register availability on readyWorkers channel.
readyWorkers <- taskChan
}
}()
}