57 Commits

Author SHA1 Message Date
joy.zhou
0d69f13e48 Revert "op: low performance code (#102)"
This reverts commit c2248bed2b.
2021-01-07 16:22:51 +08:00
c2248bed2b op: low performance code (#102)
thanks
2021-01-07 14:12:28 +08:00
turtletramp
6be79cbe88 Bugfix - authfile plugin did wrongly use username as IP and IP as username in ACL checks (#100)
* adding test + fix issue with wrong order in acl check

* reduce to featureset from original fork
2020-12-02 10:05:46 +08:00
sngyai
6cb307d252 Feature qos1&qos2 (#99)
* client publish qos2

* server dispatch qos1&qos2

* Use at most one timer for each client

* Use at most one timer for each client
2020-11-30 11:34:03 +08:00
joy,zhou
b8bacb4c3d fixed bug #96 2020-08-26 17:24:22 +08:00
chujiangke
481a61c520 fix (#90) 2020-06-24 15:14:25 +08:00
Rajiv Shah
4782f76048 Replace satori/go.uuid with google/uuid (#89) 2020-06-09 10:13:37 +08:00
Aleksey Myasnikov
1a374f9734 Update comm.go (#85) 2020-05-08 11:26:44 +08:00
janson
3f60d23e85 fix fail in cluster deploy (#86)
Co-authored-by: janson <janson@gmail.com>
2020-05-08 11:26:26 +08:00
yu
3cf90d5231 add websocket client ip 2020-04-16 14:08:51 +08:00
gerdstolpmann
a1bf3d93b2 only set a read deadline when the keep-alive value is positive (#83) 2020-04-16 10:33:17 +08:00
gerdstolpmann
af7db83bdc do not try to set remoteIP for websocket connections (#81) 2020-04-04 10:41:36 +08:00
gerdstolpmann
839041e912 do not expect "Origin" header for websocket connections (#80)
* websocket: do not check the presence of the "Origin" header

* avoid using http.DefaultServeMux
2020-04-04 10:40:12 +08:00
gerdstolpmann
17dac26996 if used as library, allow that the auth and bridge plugins can be set by (#79)
struct, and not only by name
2020-04-03 14:49:50 +08:00
joy.zhou
55f1f1aa80 Update deploy.yaml 2020-01-19 11:19:21 +08:00
joy.zhou
ccb7c37b96 Update svc.yaml 2020-01-19 11:18:44 +08:00
joy.zhou
7e29cc7213 Update svc.yaml 2020-01-19 11:18:38 +08:00
winglq
1971b5c324 update retained message even if it's already there (#70)
Signed-off-by: Liu Qing <winglq@gmail.com>
2020-01-06 11:22:59 +08:00
foosinn
fb453e8c0f fix ipv6 addresses (#68) 2019-12-30 13:42:31 +08:00
joy.zhou
eef900ad2f Update comm.go 2019-12-25 17:14:44 +08:00
joy.zhou
d24e0dac13 Update info.go 2019-12-25 17:14:11 +08:00
joy.zhou
fd0622710b Update client.go 2019-12-25 17:13:44 +08:00
joy.zhou
73dd5bb376 Update config.go 2019-12-25 17:13:16 +08:00
joy.zhou
474c557c7a Update sesson.go 2019-12-25 17:12:59 +08:00
joy.zhou
f3e7e5481a Update auth.go 2019-12-25 17:12:30 +08:00
joy.zhou
57fce9c7dc Update broker.go 2019-12-25 17:12:07 +08:00
joy.zhou
995898c5f4 Update main.go 2019-12-25 17:10:32 +08:00
joy.zhou
2404693bd2 fix issue #66 2019-12-12 15:07:12 +08:00
joy.zhou
68cd5e94a4 Merge branch 'master' of https://github.com/fhmq/hmq 2019-11-14 11:09:52 +08:00
joy.zhou
44fa819f62 update some logic 2019-11-14 11:09:15 +08:00
joy.zhou
2b7bb3fcd5 Update README.md 2019-11-11 21:08:21 +08:00
joy.zhou
4c107c67ab fix bug (#63)
* update

* update auth file

* fixbug
2019-11-11 11:41:38 +08:00
joy.zhou
896769fd9d Add acl (#61)
* update

* update auth file
2019-10-30 14:44:18 +08:00
joy.zhou
c7a51fe68f fixed 2019-09-30 11:06:05 +08:00
joy.zhou
a3fc611615 fix issue 2019-09-30 11:04:46 +08:00
H.K
e74b9facd1 fix: (#57)
topics used but not make
2019-09-30 10:50:40 +08:00
joy.zhou
53a79caad9 update deploy 2019-09-18 14:17:19 +08:00
joy.zhou
55576c1eb3 update kafka plugins 2019-09-18 14:00:19 +08:00
joy.zhou
80b64b147e delete acl file 2019-08-23 16:40:39 +08:00
joy.zhou
ea055d5929 update authhttp 2019-08-23 16:22:59 +08:00
joy.zhou
8d8707801f REMOVE NO USE 2019-08-20 10:27:15 +08:00
joy.zhou
fd2974a546 update Readme 2019-08-19 10:57:29 +08:00
joy.zhou
72211efedf Merge branches 'plugin_update' and 'master' of https://github.com/fhmq/hmq 2019-08-19 10:48:55 +08:00
joy.zhou
7e15da209e Plugin update (#48)
* replace plugin

* update plugin
2019-08-19 10:35:17 +08:00
joy.zhou
69a26f8cd9 update plugin 2019-08-19 10:33:19 +08:00
joy.zhou
148738800b replace plugin 2019-08-16 18:18:19 +08:00
joy.zhou
e4e736d1e2 update readme.md 2019-08-02 10:10:27 +08:00
joy.zhou
4c5a48a44b Plugins update log (#47)
* modify

* update

* add acl

* add feature

* update dockerfile

* add deploy

* update

* update

* plugins

* plugins

* update

* update

* update

* fixed

* remove

* fixed

* add log

* update

* fixed

* update

* fix config

* add http api

* add http api

* resp

* add config for work chan

* update

* fixed

* update

* disable trace

* fixed

* change acl

* fixed

* fixed res

* dd

* dd

* ddd

* dd

* update

* fixed

* update

* add

* fixed

* update key

* add log

* update

* format

* update

* update auth

* update

* update readme

* added

* update

* fixed

* fixed

* fix

* upade

* update

* update

* update
2019-07-25 16:01:40 +08:00
joy.zhou
c6b1f1db42 Plugins support (#46)
* modify

* update

* add acl

* add feature

* update dockerfile

* add deploy

* update

* update

* plugins

* plugins

* update

* update

* update

* fixed

* remove

* fixed

* add log

* update

* fixed

* update

* fix config

* add http api

* add http api

* resp

* add config for work chan

* update

* fixed

* update

* disable trace

* fixed

* change acl

* fixed

* fixed res

* dd

* dd

* ddd

* dd

* update

* fixed

* update

* add

* fixed

* update key

* add log

* update

* format

* update

* update auth

* update

* update readme

* added

* update

* fixed

* fixed

* fix

* upade

* update

* update
2019-07-25 13:54:42 +08:00
Yuyan Zhou
daf4a0e0f5 add vendor 2019-04-24 15:45:34 +08:00
joy.zhou
c350d16ca1 add fix pool for message order (#42)
* fix pool for message order

* add go modules
2019-04-24 14:54:21 +08:00
Yuyan Zhou
edc46c1ee6 remove publish message check 2019-04-22 10:21:14 +08:00
joyz
6193be74fa Merge branch 'master' of https://github.com/fhmq/hmq 2019-01-22 22:11:59 +08:00
joyz
90beada459 some modify 2019-01-22 22:11:54 +08:00
Marc Magnin
6c7fe6a0f7 simple fix (#35) 2019-01-07 19:56:00 +08:00
joyz
2b56664d85 remove no use 2018-12-27 21:22:32 +08:00
joy.zhou
7547ad3bdc Restruct (#34)
* modify

* remove

* modify

* modify

* remove no use

* add online/offline notification

* modify

* format log

* add reference
2018-12-26 14:51:13 +08:00
45 changed files with 1925 additions and 645 deletions

3
.gitignore vendored
View File

@@ -1,4 +1,5 @@
hmq hmq
log log
log/* log/*
*.test *.test
.vscode/settings.json

11
.vscode/settings.json vendored Normal file
View File

@@ -0,0 +1,11 @@
{
"go.lintFlags": [
"--disable=all",
"--enable=errcheck,varcheck,deadcode",
"--enable=varcheck",
"--enable=deadcode"
],
"cSpell.words": [
"Authorised"
]
}

View File

@@ -1,11 +1,12 @@
FROM alpine FROM golang:1.14 as builder
COPY hmq / WORKDIR /go/src/github.com/fhmq/hmq
COPY ssl /ssl COPY . .
COPY conf /conf RUN CGO_ENABLED=0 go build -o hmq -a -ldflags '-extldflags "-static"' .
FROM alpine:3.8
WORKDIR /
COPY --from=builder /go/src/github.com/fhmq/hmq/hmq .
EXPOSE 1883 EXPOSE 1883
EXPOSE 1888
EXPOSE 8883
EXPOSE 1993
CMD ["/hmq"] CMD ["/hmq"]

View File

@@ -5,8 +5,6 @@ Free and High Performance MQTT Broker
Golang MQTT Broker, Version 3.1.1, and Compatible Golang MQTT Broker, Version 3.1.1, and Compatible
for [eclipse paho client](https://github.com/eclipse?utf8=%E2%9C%93&q=mqtt&type=&language=) and mosquitto-client for [eclipse paho client](https://github.com/eclipse?utf8=%E2%9C%93&q=mqtt&type=&language=) and mosquitto-client
Download: [click here](https://github.com/fhmq/hmq/releases)
## RUNNING ## RUNNING
```bash ```bash
$ go get github.com/fhmq/hmq $ go get github.com/fhmq/hmq
@@ -60,8 +58,10 @@ Common Options:
"certFile": "tls/server/cert.pem", "certFile": "tls/server/cert.pem",
"keyFile": "tls/server/key.pem" "keyFile": "tls/server/key.pem"
}, },
"acl":true, "plugins": {
"aclConf":"conf/acl.conf" "auth": "authhttp",
"bridge": "kafka"
}
} }
~~~ ~~~
@@ -81,7 +81,24 @@ Common Options:
* TLS/SSL Support * TLS/SSL Support
* Flexible ACL * Auth Support
* Auth Connect
* Auth ACL
* Cache Support
* Kafka Bridge Support
* Action Deliver
* Regexp Deliver
* HTTP API
* Disconnect Connect (future more)
### Share SUBSCRIBE
~~~
| Prefix | Examples | Publish |
| ------------------- |-------------------------------------------|--------------------------- --|
| $share/<group>/topic | mosquitto_sub -t $share/<group>/topic | mosquitto_pub -t topic |
~~~
### Cluster ### Cluster
```bash ```bash
@@ -92,58 +109,7 @@ Common Options:
2, config router in hmq.config ("router": "127.0.0.1:9888") 2, config router in hmq.config ("router": "127.0.0.1:9888")
``` ```
Other Version Of Cluster Based On gRPC: [click here](https://github.com/fhmq/rhmq)
### ACL Configure
#### The ACL rules define:
~~~
Allow | type | value | pubsub | Topics
~~~
#### ACL Config
~~~
## type clientid , username, ipaddr
##pub 1 , sub 2, pubsub 3
## %c is clientid , %u is username
allow ip 127.0.0.1 2 $SYS/#
allow clientid 0001 3 #
allow username admin 3 #
allow username joy 3 /test,hello/world
allow clientid * 1 toCloud/%c
allow username * 1 toCloud/%u
deny clientid * 3 #
~~~
~~~
#allow local sub $SYS topic
allow ip 127.0.0.1 2 $SYS/#
~~~
~~~
#allow client who's id with 0001 or username with admin pub sub all topic
allow clientid 0001 3 #
allow username admin 3 #
~~~
~~~
#allow client with the username joy can pub sub topic '/test' and 'hello/world'
allow username joy 3 /test,hello/world
~~~
~~~
#allow all client pub the topic toCloud/{clientid/username}
allow clientid * 1 toCloud/%c
allow username * 1 toCloud/%u
~~~
~~~
#deny all client pub sub all topic
deny clientid * 3 #
~~~
Client match acl rule one by one
~~~
--------- --------- ---------
Client -> | Rule1 | --nomatch--> | Rule2 | --nomatch--> | Rule3 | -->
--------- --------- ---------
| | |
match match match
\|/ \|/ \|/
allow | deny allow | deny allow | deny
~~~
### Online/Offline Notification ### Online/Offline Notification
```bash ```bash
@@ -169,4 +135,9 @@ Client -> | Rule1 | --nomatch--> | Rule2 | --nomatch--> | Rule3 | -->
## Reference ## Reference
* Surgermq.(https://github.com/surgemq/surgemq) * Surgermq.(https://github.com/surgemq/surgemq)
## Benchmark Tool
* https://github.com/inovex/mqtt-stresser
* https://github.com/krylovsk/mqtt-benchmark

View File

@@ -1,81 +1,40 @@
/* Copyright (c) 2018, joy.zhou <chowyu08@gmail.com>
*/
package broker package broker
import ( import (
"github.com/fhmq/hmq/lib/acl"
"github.com/fsnotify/fsnotify"
"go.uber.org/zap"
"strings" "strings"
) )
const ( const (
PUB = 1 SUB = "1"
SUB = 2 PUB = "2"
) )
func (c *client) CheckTopicAuth(typ int, topic string) bool { func (b *Broker) CheckTopicAuth(action, clientID, username, ip, topic string) bool {
if c.typ != CLIENT || !c.broker.config.Acl { if b.auth != nil {
return true if strings.HasPrefix(topic, "$SYS/broker/connection/clients/") {
} return true
if strings.HasPrefix(topic, "$queue/") {
topic = string([]byte(topic)[7:])
if topic == "" {
return false
} }
if strings.HasPrefix(topic, "$share/") && action == SUB {
substr := groupCompile.FindStringSubmatch(topic)
if len(substr) != 3 {
return false
}
topic = substr[2]
}
return b.auth.CheckACL(action, clientID, username, ip, topic)
} }
ip := c.info.remoteIP
username := string(c.info.username) return true
clientid := string(c.info.clientID)
aclInfo := c.broker.AclConfig
return acl.CheckTopicAuth(aclInfo, typ, ip, username, clientid, topic)
} }
var ( func (b *Broker) CheckConnectAuth(clientID, username, password string) bool {
watchList = []string{"./conf"} if b.auth != nil {
) return b.auth.CheckConnect(clientID, username, password)
func (b *Broker) handleFsEvent(event fsnotify.Event) error {
switch event.Name {
case b.config.AclConf:
if event.Op&fsnotify.Write == fsnotify.Write ||
event.Op&fsnotify.Create == fsnotify.Create {
log.Info("text:handling acl config change event:", zap.String("filename", event.Name))
aclconfig, err := acl.AclConfigLoad(event.Name)
if err != nil {
log.Error("aclconfig change failed, load acl conf error: ", zap.Error(err))
return err
}
b.AclConfig = aclconfig
}
} }
return nil
}
func (b *Broker) StartAclWatcher() { return true
go func() {
wch, e := fsnotify.NewWatcher()
if e != nil {
log.Error("start monitor acl config file error,", zap.Error(e))
return
}
defer wch.Close()
for _, i := range watchList {
if err := wch.Add(i); err != nil {
log.Error("start monitor acl config file error,", zap.Error(err))
return
}
}
log.Info("watching acl config file change...")
for {
select {
case evt := <-wch.Events:
b.handleFsEvent(evt)
case err := <-wch.Errors:
log.Error("error:", zap.Error(err))
}
}
}()
} }

15
broker/bridge.go Normal file
View File

@@ -0,0 +1,15 @@
package broker
import (
"github.com/fhmq/hmq/plugins/bridge"
"go.uber.org/zap"
)
func (b *Broker) Publish(e *bridge.Elements) {
if b.bridgeMQ != nil {
err := b.bridgeMQ.Publish(e)
if err != nil {
log.Error("send message to mq error.", zap.Error(err))
}
}
}

View File

@@ -1,5 +1,3 @@
/* Copyright (c) 2018, joy.zhou <chowyu08@gmail.com>
*/
package broker package broker
import ( import (
@@ -7,17 +5,18 @@ import (
"fmt" "fmt"
"net" "net"
"net/http" "net/http"
"runtime/debug"
"sync" "sync"
"sync/atomic"
"time" "time"
"github.com/fhmq/hmq/plugins/bridge"
"github.com/fhmq/hmq/plugins/auth"
"github.com/fhmq/hmq/broker/lib/sessions"
"github.com/fhmq/hmq/broker/lib/topics"
"github.com/eclipse/paho.mqtt.golang/packets" "github.com/eclipse/paho.mqtt.golang/packets"
"github.com/fhmq/hmq/lib/acl"
"github.com/fhmq/hmq/lib/sessions"
"github.com/fhmq/hmq/lib/topics"
"github.com/fhmq/hmq/pool" "github.com/fhmq/hmq/pool"
"github.com/shirou/gopsutil/mem"
"go.uber.org/zap" "go.uber.org/zap"
"golang.org/x/net/websocket" "golang.org/x/net/websocket"
) )
@@ -34,21 +33,19 @@ type Message struct {
type Broker struct { type Broker struct {
id string id string
cid uint64
mu sync.Mutex mu sync.Mutex
config *Config config *Config
tlsConfig *tls.Config tlsConfig *tls.Config
AclConfig *acl.ACLConfig
wpool *pool.WorkerPool wpool *pool.WorkerPool
clients sync.Map clients sync.Map
routes sync.Map routes sync.Map
remotes sync.Map remotes sync.Map
nodes map[string]interface{} nodes map[string]interface{}
clusterPool chan *Message clusterPool chan *Message
queues map[string]int
topicsMgr *topics.Manager topicsMgr *topics.Manager
sessionMgr *sessions.Manager sessionMgr *sessions.Manager
// messagePool []chan *Message auth auth.Auth
bridgeMQ bridge.BridgeMQ
} }
func newMessagePool() []chan *Message { func newMessagePool() []chan *Message {
@@ -61,12 +58,15 @@ func newMessagePool() []chan *Message {
} }
func NewBroker(config *Config) (*Broker, error) { func NewBroker(config *Config) (*Broker, error) {
if config == nil {
config = DefaultConfig
}
b := &Broker{ b := &Broker{
id: GenUniqueId(), id: GenUniqueId(),
config: config, config: config,
wpool: pool.New(config.Worker), wpool: pool.New(config.Worker),
nodes: make(map[string]interface{}), nodes: make(map[string]interface{}),
queues: make(map[string]int),
clusterPool: make(chan *Message), clusterPool: make(chan *Message),
} }
@@ -91,19 +91,14 @@ func NewBroker(config *Config) (*Broker, error) {
} }
b.tlsConfig = tlsconfig b.tlsConfig = tlsconfig
} }
if b.config.Acl {
aclconfig, err := acl.AclConfigLoad(b.config.AclConf) b.auth = b.config.Plugin.Auth
if err != nil { b.bridgeMQ = b.config.Plugin.Bridge
log.Error("Load acl conf error", zap.Error(err))
return nil, err
}
b.AclConfig = aclconfig
b.StartAclWatcher()
}
return b, nil return b, nil
} }
func (b *Broker) SubmitWork(msg *Message) { func (b *Broker) SubmitWork(clientId string, msg *Message) {
if b.wpool == nil { if b.wpool == nil {
b.wpool = pool.New(b.config.Worker) b.wpool = pool.New(b.config.Worker)
} }
@@ -111,7 +106,7 @@ func (b *Broker) SubmitWork(msg *Message) {
if msg.client.typ == CLUSTER { if msg.client.typ == CLUSTER {
b.clusterPool <- msg b.clusterPool <- msg
} else { } else {
b.wpool.Submit(func() { b.wpool.Submit(clientId, func() {
ProcessMessage(msg) ProcessMessage(msg)
}) })
} }
@@ -124,7 +119,11 @@ func (b *Broker) Start() {
return return
} }
//listen clinet over tcp if b.config.HTTPPort != "" {
go InitHTTPMoniter(b)
}
//listen client over tcp
if b.config.Port != "" { if b.config.Port != "" {
go b.StartClientListening(false) go b.StartClientListening(false)
} }
@@ -150,34 +149,20 @@ func (b *Broker) Start() {
b.ConnectToDiscovery() b.ConnectToDiscovery()
} }
//system monitor
go StateMonitor()
}
func StateMonitor() {
v, _ := mem.VirtualMemory()
timeSticker := time.NewTicker(time.Second * 30)
for {
select {
case <-timeSticker.C:
if v.UsedPercent > 75 {
debug.FreeOSMemory()
}
}
}
} }
func (b *Broker) StartWebsocketListening() { func (b *Broker) StartWebsocketListening() {
path := b.config.WsPath path := b.config.WsPath
hp := ":" + b.config.WsPort hp := ":" + b.config.WsPort
log.Info("Start Websocket Listener on:", zap.String("hp", hp), zap.String("path", path)) log.Info("Start Websocket Listener on:", zap.String("hp", hp), zap.String("path", path))
http.Handle(path, websocket.Handler(b.wsHandler)) ws := &websocket.Server{Handler: websocket.Handler(b.wsHandler)}
mux := http.NewServeMux()
mux.Handle(path, ws)
var err error var err error
if b.config.WsTLS { if b.config.WsTLS {
err = http.ListenAndServeTLS(hp, b.config.TlsInfo.CertFile, b.config.TlsInfo.KeyFile, nil) err = http.ListenAndServeTLS(hp, b.config.TlsInfo.CertFile, b.config.TlsInfo.KeyFile, mux)
} else { } else {
err = http.ListenAndServe(hp, nil) err = http.ListenAndServe(hp, mux)
} }
if err != nil { if err != nil {
log.Error("ListenAndServe:" + err.Error()) log.Error("ListenAndServe:" + err.Error())
@@ -187,7 +172,6 @@ func (b *Broker) StartWebsocketListening() {
func (b *Broker) wsHandler(ws *websocket.Conn) { func (b *Broker) wsHandler(ws *websocket.Conn) {
// io.Copy(ws, ws) // io.Copy(ws, ws)
atomic.AddUint64(&b.cid, 1)
ws.PayloadType = websocket.BinaryFrame ws.PayloadType = websocket.BinaryFrame
b.handleConnection(CLIENT, ws) b.handleConnection(CLIENT, ws)
} }
@@ -227,41 +211,11 @@ func (b *Broker) StartClientListening(Tls bool) {
continue continue
} }
tmpDelay = ACCEPT_MIN_SLEEP tmpDelay = ACCEPT_MIN_SLEEP
atomic.AddUint64(&b.cid, 1)
go b.handleConnection(CLIENT, conn) go b.handleConnection(CLIENT, conn)
} }
} }
func (b *Broker) Handshake(conn net.Conn) bool {
nc := tls.Server(conn, b.tlsConfig)
time.AfterFunc(DEFAULT_TLS_TIMEOUT, func() { TlsTimeout(nc) })
nc.SetReadDeadline(time.Now().Add(DEFAULT_TLS_TIMEOUT))
// Force handshake
if err := nc.Handshake(); err != nil {
log.Error("TLS handshake error, ", zap.Error(err))
return false
}
nc.SetReadDeadline(time.Time{})
return true
}
func TlsTimeout(conn *tls.Conn) {
nc := conn
// Check if already closed
if nc == nil {
return
}
cs := nc.ConnectionState()
if !cs.HandshakeComplete {
log.Error("TLS handshake timeout")
nc.Close()
}
}
func (b *Broker) StartClusterListening() { func (b *Broker) StartClusterListening() {
var hp string = b.config.Cluster.Host + ":" + b.config.Cluster.Port var hp string = b.config.Cluster.Host + ":" + b.config.Cluster.Port
log.Info("Start Listening cluster on ", zap.String("hp", hp)) log.Info("Start Listening cluster on ", zap.String("hp", hp))
@@ -311,9 +265,32 @@ func (b *Broker) handleConnection(typ int, conn net.Conn) {
log.Error("received msg that was not Connect") log.Error("received msg that was not Connect")
return return
} }
log.Info("read connect from ", zap.String("clientID", msg.ClientIdentifier))
connack := packets.NewControlPacket(packets.Connack).(*packets.ConnackPacket) connack := packets.NewControlPacket(packets.Connack).(*packets.ConnackPacket)
connack.ReturnCode = packets.Accepted
connack.SessionPresent = msg.CleanSession connack.SessionPresent = msg.CleanSession
connack.ReturnCode = msg.Validate()
if connack.ReturnCode != packets.Accepted {
err = connack.Write(conn)
if err != nil {
log.Error("send connack error, ", zap.Error(err), zap.String("clientID", msg.ClientIdentifier))
return
}
return
}
if typ == CLIENT && !b.CheckConnectAuth(string(msg.ClientIdentifier), string(msg.Username), string(msg.Password)) {
connack.ReturnCode = packets.ErrRefusedNotAuthorised
err = connack.Write(conn)
if err != nil {
log.Error("send connack error, ", zap.Error(err), zap.String("clientID", msg.ClientIdentifier))
return
}
return
}
err = connack.Write(conn) err = connack.Write(conn)
if err != nil { if err != nil {
log.Error("send connack error, ", zap.Error(err), zap.String("clientID", msg.ClientIdentifier)) log.Error("send connack error, ", zap.Error(err), zap.String("clientID", msg.ClientIdentifier))
@@ -371,6 +348,14 @@ func (b *Broker) handleConnection(typ int, conn net.Conn) {
b.clients.Store(cid, c) b.clients.Store(cid, c)
b.OnlineOfflineNotification(cid, true) b.OnlineOfflineNotification(cid, true)
{
b.Publish(&bridge.Elements{
ClientID: string(msg.ClientIdentifier),
Username: string(msg.Username),
Action: bridge.Connect,
Timestamp: time.Now().Unix(),
})
}
case ROUTER: case ROUTER:
old, exist = b.routes.Load(cid) old, exist = b.routes.Load(cid)
if exist { if exist {
@@ -383,8 +368,6 @@ func (b *Broker) handleConnection(typ int, conn net.Conn) {
b.routes.Store(cid, c) b.routes.Store(cid, c)
} }
// mpool := b.messagePool[fnv1a.HashString64(cid)%MessagePoolNum]
c.readLoop() c.readLoop()
} }
@@ -617,7 +600,9 @@ func (b *Broker) removeClient(c *client) {
func (b *Broker) PublishMessage(packet *packets.PublishPacket) { func (b *Broker) PublishMessage(packet *packets.PublishPacket) {
var subs []interface{} var subs []interface{}
var qoss []byte var qoss []byte
b.mu.Lock()
err := b.topicsMgr.Subscribers([]byte(packet.TopicName), packet.Qos, &subs, &qoss) err := b.topicsMgr.Subscribers([]byte(packet.TopicName), packet.Qos, &subs, &qoss)
b.mu.Unlock()
if err != nil { if err != nil {
log.Error("search sub client error, ", zap.Error(err)) log.Error("search sub client error, ", zap.Error(err))
return return

View File

@@ -1,19 +1,23 @@
/* Copyright (c) 2018, joy.zhou <chowyu08@gmail.com>
*/
package broker package broker
import ( import (
"context" "context"
"errors" "errors"
"github.com/eapache/queue"
"math/rand"
"net" "net"
"reflect" "reflect"
"regexp"
"strings" "strings"
"sync" "sync"
"time" "time"
"github.com/fhmq/hmq/broker/lib/sessions"
"github.com/fhmq/hmq/broker/lib/topics"
"github.com/fhmq/hmq/plugins/bridge"
"golang.org/x/net/websocket"
"github.com/eclipse/paho.mqtt.golang/packets" "github.com/eclipse/paho.mqtt.golang/packets"
"github.com/fhmq/hmq/lib/sessions"
"github.com/fhmq/hmq/lib/topics"
"go.uber.org/zap" "go.uber.org/zap"
) )
@@ -28,39 +32,68 @@ const (
REMOTE = 2 REMOTE = 2
CLUSTER = 3 CLUSTER = 3
) )
const (
_GroupTopicRegexp = `^\$share/([0-9a-zA-Z_-]+)/(.*)$`
)
const ( const (
Connected = 1 Connected = 1
Disconnected = 2 Disconnected = 2
) )
const (
awaitRelTimeout int64 = 20
retryInterval int64 = 20
)
var (
groupCompile = regexp.MustCompile(_GroupTopicRegexp)
)
type client struct { type client struct {
typ int typ int
mu sync.Mutex mu sync.Mutex
broker *Broker broker *Broker
conn net.Conn conn net.Conn
info info info info
route route route route
status int status int
ctx context.Context ctx context.Context
cancelFunc context.CancelFunc cancelFunc context.CancelFunc
session *sessions.Session session *sessions.Session
subMap map[string]*subscription subMap map[string]*subscription
topicsMgr *topics.Manager topicsMgr *topics.Manager
subs []interface{} subs []interface{}
qoss []byte qoss []byte
rmsgs []*packets.PublishPacket rmsgs []*packets.PublishPacket
routeSubMap map[string]uint64
awaitingRel map[uint16]int64
maxAwaitingRel int
inflight map[uint16]*inflightElem
mqueue *queue.Queue
retryTimer *time.Timer
retryTimerLock sync.Mutex
} }
type subInfo struct { type InflightStatus uint8
sub *subscription
num int
}
const (
Publish InflightStatus = 0
Pubrel InflightStatus = 1
)
type inflightElem struct {
status InflightStatus
packet *packets.PublishPacket
timestamp int64
}
type subscription struct { type subscription struct {
client *client client *client
topic string topic string
qos byte qos byte
queue bool share bool
groupName string
} }
type info struct { type info struct {
@@ -79,16 +112,29 @@ type route struct {
} }
var ( var (
DisconnectdPacket = packets.NewControlPacket(packets.Disconnect).(*packets.DisconnectPacket) DisconnectedPacket = packets.NewControlPacket(packets.Disconnect).(*packets.DisconnectPacket)
r = rand.New(rand.NewSource(time.Now().UnixNano()))
) )
func (c *client) init() { func (c *client) init() {
c.status = Connected c.status = Connected
c.info.localIP = strings.Split(c.conn.LocalAddr().String(), ":")[0] c.info.localIP, _, _ = net.SplitHostPort(c.conn.LocalAddr().String())
c.info.remoteIP = strings.Split(c.conn.RemoteAddr().String(), ":")[0] remoteAddr := c.conn.RemoteAddr()
remoteNetwork := remoteAddr.Network()
c.info.remoteIP = ""
if remoteNetwork != "websocket" {
c.info.remoteIP, _, _ = net.SplitHostPort(remoteAddr.String())
} else {
ws := c.conn.(*websocket.Conn)
c.info.remoteIP, _, _ = net.SplitHostPort(ws.Request().RemoteAddr)
}
c.ctx, c.cancelFunc = context.WithCancel(context.Background()) c.ctx, c.cancelFunc = context.WithCancel(context.Background())
c.subMap = make(map[string]*subscription) c.subMap = make(map[string]*subscription)
c.topicsMgr = c.broker.topicsMgr c.topicsMgr = c.broker.topicsMgr
c.routeSubMap = make(map[string]uint64)
c.awaitingRel = make(map[uint16]int64)
c.inflight = make(map[uint16]*inflightElem)
c.mqueue = queue.New()
} }
func (c *client) readLoop() { func (c *client) readLoop() {
@@ -107,16 +153,26 @@ func (c *client) readLoop() {
return return
default: default:
//add read timeout //add read timeout
if err := nc.SetReadDeadline(time.Now().Add(timeOut)); err != nil { if keepAlive > 0 {
log.Error("set read timeout error: ", zap.Error(err), zap.String("ClientID", c.info.clientID)) if err := nc.SetReadDeadline(time.Now().Add(timeOut)); err != nil {
return log.Error("set read timeout error: ", zap.Error(err), zap.String("ClientID", c.info.clientID))
msg := &Message{
client: c,
packet: DisconnectedPacket,
}
b.SubmitWork(c.info.clientID, msg)
return
}
} }
packet, err := packets.ReadPacket(nc) packet, err := packets.ReadPacket(nc)
if err != nil { if err != nil {
log.Error("read packet error: ", zap.Error(err), zap.String("ClientID", c.info.clientID)) log.Error("read packet error: ", zap.Error(err), zap.String("ClientID", c.info.clientID))
msg := &Message{client: c, packet: DisconnectdPacket} msg := &Message{
b.SubmitWork(msg) client: c,
packet: DisconnectedPacket,
}
b.SubmitWork(c.info.clientID, msg)
return return
} }
@@ -124,7 +180,7 @@ func (c *client) readLoop() {
client: c, client: c,
packet: packet, packet: packet,
} }
b.SubmitWork(msg) b.SubmitWork(c.info.clientID, msg)
} }
} }
@@ -136,7 +192,11 @@ func ProcessMessage(msg *Message) {
if ca == nil { if ca == nil {
return return
} }
log.Debug("Recv message:", zap.String("message type", reflect.TypeOf(msg.packet).String()[9:]), zap.String("ClientID", c.info.clientID))
if c.typ == CLIENT {
log.Debug("Recv message:", zap.String("message type", reflect.TypeOf(msg.packet).String()[9:]), zap.String("ClientID", c.info.clientID))
}
switch ca.(type) { switch ca.(type) {
case *packets.ConnackPacket: case *packets.ConnackPacket:
case *packets.ConnectPacket: case *packets.ConnectPacket:
@@ -144,9 +204,43 @@ func ProcessMessage(msg *Message) {
packet := ca.(*packets.PublishPacket) packet := ca.(*packets.PublishPacket)
c.ProcessPublish(packet) c.ProcessPublish(packet)
case *packets.PubackPacket: case *packets.PubackPacket:
packet := ca.(*packets.PubackPacket)
if _, found := c.inflight[packet.MessageID]; found {
delete(c.inflight, packet.MessageID)
} else {
log.Error("Duplicated PUBACK PacketId", zap.Uint16("MessageID", packet.MessageID))
}
case *packets.PubrecPacket: case *packets.PubrecPacket:
packet := ca.(*packets.PubrecPacket)
if _, found := c.inflight[packet.MessageID]; found {
if c.inflight[packet.MessageID].status == Publish {
c.inflight[packet.MessageID].status = Pubrel
c.inflight[packet.MessageID].timestamp = time.Now().Unix()
} else if c.inflight[packet.MessageID].status == Pubrel {
log.Error("Duplicated PUBREC PacketId", zap.Uint16("MessageID", packet.MessageID))
}
} else {
log.Error("The PUBREC PacketId is not found.", zap.Uint16("MessageID", packet.MessageID))
}
pubrel := packets.NewControlPacket(packets.Pubrel).(*packets.PubrelPacket)
pubrel.MessageID = packet.MessageID
if err := c.WriterPacket(pubrel); err != nil {
log.Error("send pubrel error, ", zap.Error(err), zap.String("ClientID", c.info.clientID))
return
}
case *packets.PubrelPacket: case *packets.PubrelPacket:
packet := ca.(*packets.PubrelPacket)
c.pubRel(packet.MessageID)
pubcomp := packets.NewControlPacket(packets.Pubcomp).(*packets.PubcompPacket)
pubcomp.MessageID = packet.MessageID
if err := c.WriterPacket(pubcomp); err != nil {
log.Error("send pubcomp error, ", zap.Error(err), zap.String("ClientID", c.info.clientID))
return
}
case *packets.PubcompPacket: case *packets.PubcompPacket:
packet := ca.(*packets.PubcompPacket)
delete(c.inflight, packet.MessageID)
case *packets.SubscribePacket: case *packets.SubscribePacket:
packet := ca.(*packets.SubscribePacket) packet := ca.(*packets.SubscribePacket)
c.ProcessSubscribe(packet) c.ProcessSubscribe(packet)
@@ -166,18 +260,32 @@ func ProcessMessage(msg *Message) {
} }
func (c *client) ProcessPublish(packet *packets.PublishPacket) { func (c *client) ProcessPublish(packet *packets.PublishPacket) {
switch c.typ {
case CLIENT:
c.processClientPublish(packet)
case ROUTER:
c.processRouterPublish(packet)
case CLUSTER:
c.processRemotePublish(packet)
}
}
func (c *client) processRemotePublish(packet *packets.PublishPacket) {
if c.status == Disconnected { if c.status == Disconnected {
return return
} }
topic := packet.TopicName topic := packet.TopicName
if topic == BrokerInfoTopic && c.typ == CLUSTER { if topic == BrokerInfoTopic {
c.ProcessInfo(packet) c.ProcessInfo(packet)
return return
} }
if !c.CheckTopicAuth(PUB, topic) { }
log.Error("Pub Topics Auth failed, ", zap.String("topic", topic), zap.String("ClientID", c.info.clientID))
func (c *client) processRouterPublish(packet *packets.PublishPacket) {
if c.status == Disconnected {
return return
} }
@@ -201,11 +309,58 @@ func (c *client) ProcessPublish(packet *packets.PublishPacket) {
} }
func (c *client) ProcessPublishMessage(packet *packets.PublishPacket) { func (c *client) processClientPublish(packet *packets.PublishPacket) {
if c.status == Disconnected {
topic := packet.TopicName
if !c.broker.CheckTopicAuth(PUB, c.info.clientID, c.info.username, c.info.remoteIP, topic) {
log.Error("Pub Topics Auth failed, ", zap.String("topic", topic), zap.String("ClientID", c.info.clientID))
return return
} }
//publish kafka
c.broker.Publish(&bridge.Elements{
ClientID: c.info.clientID,
Username: c.info.username,
Action: bridge.Publish,
Timestamp: time.Now().Unix(),
Payload: string(packet.Payload),
Topic: topic,
})
switch packet.Qos {
case QosAtMostOnce:
c.ProcessPublishMessage(packet)
case QosAtLeastOnce:
puback := packets.NewControlPacket(packets.Puback).(*packets.PubackPacket)
puback.MessageID = packet.MessageID
if err := c.WriterPacket(puback); err != nil {
log.Error("send puback error, ", zap.Error(err), zap.String("ClientID", c.info.clientID))
return
}
c.ProcessPublishMessage(packet)
case QosExactlyOnce:
if err := c.registerPublishPacketId(packet.MessageID); err != nil {
return
} else {
pubrec := packets.NewControlPacket(packets.Pubrec).(*packets.PubrecPacket)
pubrec.MessageID = packet.MessageID
if err := c.WriterPacket(pubrec); err != nil {
log.Error("send pubrec error, ", zap.Error(err), zap.String("ClientID", c.info.clientID))
return
}
c.ProcessPublishMessage(packet)
}
return
default:
log.Error("publish with unknown qos", zap.String("ClientID", c.info.clientID))
return
}
}
func (c *client) ProcessPublishMessage(packet *packets.PublishPacket) {
b := c.broker b := c.broker
if b == nil { if b == nil {
return return
@@ -224,12 +379,13 @@ func (c *client) ProcessPublishMessage(packet *packets.PublishPacket) {
return return
} }
// log.Info("psubs num: ", len(r.psubs)) // fmt.Println("psubs num: ", len(c.subs))
if len(c.subs) == 0 { if len(c.subs) == 0 {
return return
} }
for _, sub := range c.subs { var qsub []int
for i, sub := range c.subs {
s, ok := sub.(*subscription) s, ok := sub.(*subscription)
if ok { if ok {
if s.client.typ == ROUTER { if s.client.typ == ROUTER {
@@ -237,17 +393,36 @@ func (c *client) ProcessPublishMessage(packet *packets.PublishPacket) {
continue continue
} }
} }
err := s.client.WriterPacket(packet) if s.share {
if err != nil { qsub = append(qsub, i)
log.Error("process message for psub error, ", zap.Error(err), zap.String("ClientID", c.info.clientID)) } else {
publish(s, packet)
} }
} }
} }
if len(qsub) > 0 {
idx := r.Intn(len(qsub))
sub := c.subs[qsub[idx]].(*subscription)
publish(sub, packet)
}
} }
func (c *client) ProcessSubscribe(packet *packets.SubscribePacket) { func (c *client) ProcessSubscribe(packet *packets.SubscribePacket) {
switch c.typ {
case CLIENT:
c.processClientSubscribe(packet)
case ROUTER:
fallthrough
case REMOTE:
c.processRouterSubscribe(packet)
}
}
func (c *client) processClientSubscribe(packet *packets.SubscribePacket) {
if c.status == Disconnected { if c.status == Disconnected {
return return
} }
@@ -266,25 +441,56 @@ func (c *client) ProcessSubscribe(packet *packets.SubscribePacket) {
for i, topic := range topics { for i, topic := range topics {
t := topic t := topic
//check topic auth for client //check topic auth for client
if !c.CheckTopicAuth(SUB, topic) { if !b.CheckTopicAuth(SUB, c.info.clientID, c.info.username, c.info.remoteIP, topic) {
log.Error("Sub topic Auth failed: ", zap.String("topic", topic), zap.String("ClientID", c.info.clientID)) log.Error("Sub topic Auth failed: ", zap.String("topic", topic), zap.String("ClientID", c.info.clientID))
retcodes = append(retcodes, QosFailure) retcodes = append(retcodes, QosFailure)
continue continue
} }
b.Publish(&bridge.Elements{
ClientID: c.info.clientID,
Username: c.info.username,
Action: bridge.Subscribe,
Timestamp: time.Now().Unix(),
Topic: topic,
})
groupName := ""
share := false
if strings.HasPrefix(topic, "$share/") {
substr := groupCompile.FindStringSubmatch(topic)
if len(substr) != 3 {
retcodes = append(retcodes, QosFailure)
continue
}
share = true
groupName = substr[1]
topic = substr[2]
}
if oldSub, exist := c.subMap[t]; exist {
c.topicsMgr.Unsubscribe([]byte(oldSub.topic), oldSub)
delete(c.subMap, t)
}
sub := &subscription{ sub := &subscription{
topic: t, topic: topic,
qos: qoss[i], qos: qoss[i],
client: c, client: c,
share: share,
groupName: groupName,
} }
rqos, err := c.topicsMgr.Subscribe([]byte(topic), qoss[i], sub) rqos, err := c.topicsMgr.Subscribe([]byte(topic), qoss[i], sub)
if err != nil { if err != nil {
return log.Error("subscribe error, ", zap.Error(err), zap.String("ClientID", c.info.clientID))
retcodes = append(retcodes, QosFailure)
continue
} }
c.subMap[topic] = sub c.subMap[t] = sub
c.session.AddTopic(topic, qoss[i])
c.session.AddTopic(t, qoss[i])
retcodes = append(retcodes, rqos) retcodes = append(retcodes, rqos)
c.topicsMgr.Retained([]byte(topic), &c.rmsgs) c.topicsMgr.Retained([]byte(topic), &c.rmsgs)
@@ -298,9 +504,7 @@ func (c *client) ProcessSubscribe(packet *packets.SubscribePacket) {
return return
} }
//broadcast subscribe message //broadcast subscribe message
if c.typ == CLIENT { go b.BroadcastSubOrUnsubMessage(packet)
go b.BroadcastSubOrUnsubMessage(packet)
}
//process retain message //process retain message
for _, rm := range c.rmsgs { for _, rm := range c.rmsgs {
@@ -312,7 +516,76 @@ func (c *client) ProcessSubscribe(packet *packets.SubscribePacket) {
} }
} }
func (c *client) processRouterSubscribe(packet *packets.SubscribePacket) {
if c.status == Disconnected {
return
}
b := c.broker
if b == nil {
return
}
topics := packet.Topics
qoss := packet.Qoss
suback := packets.NewControlPacket(packets.Suback).(*packets.SubackPacket)
suback.MessageID = packet.MessageID
var retcodes []byte
for i, topic := range topics {
t := topic
groupName := ""
share := false
if strings.HasPrefix(topic, "$share/") {
substr := groupCompile.FindStringSubmatch(topic)
if len(substr) != 3 {
retcodes = append(retcodes, QosFailure)
continue
}
share = true
groupName = substr[1]
topic = substr[2]
}
sub := &subscription{
topic: topic,
qos: qoss[i],
client: c,
share: share,
groupName: groupName,
}
rqos, err := c.topicsMgr.Subscribe([]byte(topic), qoss[i], sub)
if err != nil {
log.Error("subscribe error, ", zap.Error(err), zap.String("ClientID", c.info.clientID))
retcodes = append(retcodes, QosFailure)
continue
}
c.subMap[t] = sub
addSubMap(c.routeSubMap, topic)
retcodes = append(retcodes, rqos)
}
suback.ReturnCodes = retcodes
err := c.WriterPacket(suback)
if err != nil {
log.Error("send suback error, ", zap.Error(err), zap.String("ClientID", c.info.clientID))
return
}
}
func (c *client) ProcessUnSubscribe(packet *packets.UnsubscribePacket) { func (c *client) ProcessUnSubscribe(packet *packets.UnsubscribePacket) {
switch c.typ {
case CLIENT:
c.processClientUnSubscribe(packet)
case ROUTER:
c.processRouterUnSubscribe(packet)
}
}
func (c *client) processRouterUnSubscribe(packet *packets.UnsubscribePacket) {
if c.status == Disconnected { if c.status == Disconnected {
return return
} }
@@ -323,13 +596,60 @@ func (c *client) ProcessUnSubscribe(packet *packets.UnsubscribePacket) {
topics := packet.Topics topics := packet.Topics
for _, topic := range topics { for _, topic := range topics {
t := []byte(topic)
sub, exist := c.subMap[topic] sub, exist := c.subMap[topic]
if exist { if exist {
c.topicsMgr.Unsubscribe(t, sub) retainNum := delSubMap(c.routeSubMap, topic)
if retainNum > 0 {
continue
}
c.topicsMgr.Unsubscribe([]byte(sub.topic), sub)
delete(c.subMap, topic)
}
}
unsuback := packets.NewControlPacket(packets.Unsuback).(*packets.UnsubackPacket)
unsuback.MessageID = packet.MessageID
err := c.WriterPacket(unsuback)
if err != nil {
log.Error("send unsuback error, ", zap.Error(err), zap.String("ClientID", c.info.clientID))
return
}
}
func (c *client) processClientUnSubscribe(packet *packets.UnsubscribePacket) {
if c.status == Disconnected {
return
}
b := c.broker
if b == nil {
return
}
topics := packet.Topics
for _, topic := range topics {
{
//publish kafka
b.Publish(&bridge.Elements{
ClientID: c.info.clientID,
Username: c.info.username,
Action: bridge.Unsubscribe,
Timestamp: time.Now().Unix(),
Topic: topic,
})
}
sub, exist := c.subMap[topic]
if exist {
c.topicsMgr.Unsubscribe([]byte(sub.topic), sub)
c.session.RemoveTopic(topic) c.session.RemoveTopic(topic)
delete(c.subMap, topic) delete(c.subMap, topic)
} }
} }
unsuback := packets.NewControlPacket(packets.Unsuback).(*packets.UnsubackPacket) unsuback := packets.NewControlPacket(packets.Unsuback).(*packets.UnsubackPacket)
@@ -341,9 +661,7 @@ func (c *client) ProcessUnSubscribe(packet *packets.UnsubscribePacket) {
return return
} }
// //process ubsubscribe message // //process ubsubscribe message
if c.typ == CLIENT { b.BroadcastSubOrUnsubMessage(packet)
b.BroadcastSubOrUnsubMessage(packet)
}
} }
func (c *client) ProcessPing() { func (c *client) ProcessPing() {
@@ -367,18 +685,32 @@ func (c *client) Close() {
c.status = Disconnected c.status = Disconnected
//wait for message complete //wait for message complete
time.Sleep(1 * time.Second) // time.Sleep(1 * time.Second)
// c.status = Disconnected // c.status = Disconnected
b := c.broker
b.Publish(&bridge.Elements{
ClientID: c.info.clientID,
Username: c.info.username,
Action: bridge.Disconnect,
Timestamp: time.Now().Unix(),
})
if c.conn != nil { if c.conn != nil {
c.conn.Close() c.conn.Close()
c.conn = nil c.conn = nil
} }
b := c.broker
subs := c.subMap subs := c.subMap
if b != nil { if b != nil {
b.removeClient(c) b.removeClient(c)
for _, sub := range subs {
err := b.topicsMgr.Unsubscribe([]byte(sub.topic), sub)
if err != nil {
log.Error("unsubscribe error, ", zap.Error(err), zap.String("ClientID", c.info.clientID))
}
}
if c.typ == CLIENT { if c.typ == CLIENT {
b.BroadcastUnSubscribe(subs) b.BroadcastUnSubscribe(subs)
@@ -402,6 +734,11 @@ func (c *client) Close() {
} }
func (c *client) WriterPacket(packet packets.ControlPacket) error { func (c *client) WriterPacket(packet packets.ControlPacket) error {
defer func() {
if err := recover(); err != nil {
log.Error("recover error, ", zap.Any("recover", r))
}
}()
if c.status == Disconnected { if c.status == Disconnected {
return nil return nil
} }
@@ -419,3 +756,50 @@ func (c *client) WriterPacket(packet packets.ControlPacket) error {
c.mu.Unlock() c.mu.Unlock()
return err return err
} }
func (c *client) registerPublishPacketId(packetId uint16) error {
if c.isAwaitingFull() {
log.Error("Dropped qos2 packet for too many awaiting_rel", zap.Uint16("id", packetId))
return errors.New("DROPPED_QOS2_PACKET_FOR_TOO_MANY_AWAITING_REL")
}
if _, found := c.awaitingRel[packetId]; found {
return errors.New("RC_PACKET_IDENTIFIER_IN_USE")
}
c.awaitingRel[packetId] = time.Now().Unix()
time.AfterFunc(time.Duration(awaitRelTimeout)*time.Second, c.expireAwaitingRel)
return nil
}
func (c *client) isAwaitingFull() bool {
if c.maxAwaitingRel == 0 {
return false
}
if len(c.awaitingRel) < c.maxAwaitingRel {
return false
}
return true
}
func (c *client) expireAwaitingRel() {
if len(c.awaitingRel) == 0 {
return
}
now := time.Now().Unix()
for packetId, Timestamp := range c.awaitingRel {
if now-Timestamp >= awaitRelTimeout {
log.Error("Dropped qos2 packet for await_rel_timeout", zap.Uint16("id", packetId))
delete(c.awaitingRel, packetId)
}
}
}
func (c *client) pubRel(packetId uint16) error {
if _, found := c.awaitingRel[packetId]; found {
delete(c.awaitingRel, packetId)
} else {
log.Error("The PUBREL PacketId is not found", zap.Uint16("id", packetId))
return errors.New("RC_PACKET_IDENTIFIER_NOT_FOUND")
}
return nil
}

View File

@@ -1,15 +1,15 @@
/* Copyright (c) 2018, joy.zhou <chowyu08@gmail.com>
*/
package broker package broker
import ( import (
"crypto/md5" "encoding/json"
"crypto/rand"
"encoding/base64"
"encoding/hex"
"io"
"reflect" "reflect"
"time" "time"
"github.com/tidwall/gjson"
"go.uber.org/zap"
"github.com/eclipse/paho.mqtt.golang/packets"
uuid "github.com/google/uuid"
) )
const ( const (
@@ -91,13 +91,139 @@ func equal(k1, k2 interface{}) bool {
return false return false
} }
func GenUniqueId() string { func addSubMap(m map[string]uint64, topic string) {
b := make([]byte, 48) subNum, exist := m[topic]
if _, err := io.ReadFull(rand.Reader, b); err != nil { if exist {
return "" m[topic] = subNum + 1
} else {
m[topic] = 1
} }
h := md5.New() }
h.Write([]byte(base64.URLEncoding.EncodeToString(b)))
return hex.EncodeToString(h.Sum(nil)) func delSubMap(m map[string]uint64, topic string) uint64 {
// return GetMd5String() subNum, exist := m[topic]
if exist {
if subNum > 1 {
m[topic] = subNum - 1
return subNum - 1
}
} else {
m[topic] = 0
}
return 0
}
func GenUniqueId() string {
id, err := uuid.NewRandom()
if err != nil {
log.Error("uuid.NewRandom() returned an error: " + err.Error())
}
return id.String()
}
func wrapPublishPacket(packet *packets.PublishPacket) *packets.PublishPacket {
p := packet.Copy()
wrapPayload := map[string]interface{}{
"message_id": GenUniqueId(),
"payload": string(p.Payload),
}
b, _ := json.Marshal(wrapPayload)
p.Payload = b
return p
}
func unWrapPublishPacket(packet *packets.PublishPacket) *packets.PublishPacket {
p := packet.Copy()
if gjson.GetBytes(p.Payload, "payload").Exists() {
p.Payload = []byte(gjson.GetBytes(p.Payload, "payload").String())
}
return p
}
func publish(sub *subscription, packet *packets.PublishPacket) {
// var p *packets.PublishPacket
// if sub.client.info.username != "root" {
// p = unWrapPublishPacket(packet)
// } else {
// p = wrapPublishPacket(packet)
// }
// err := sub.client.WriterPacket(p)
// if err != nil {
// log.Error("process message for psub error, ", zap.Error(err))
// }
switch packet.Qos {
case QosAtMostOnce:
err := sub.client.WriterPacket(packet)
if err != nil {
log.Error("process message for psub error, ", zap.Error(err))
}
case QosAtLeastOnce, QosExactlyOnce:
sub.client.inflight[packet.MessageID] = &inflightElem{status: Publish, packet: packet, timestamp: time.Now().Unix()}
err := sub.client.WriterPacket(packet)
if err != nil {
log.Error("process message for psub error, ", zap.Error(err))
}
sub.client.ensureRetryTimer()
default:
log.Error("publish with unknown qos", zap.String("ClientID", sub.client.info.clientID))
return
}
}
// timer for retry delivery
func (c *client) ensureRetryTimer(interval ...int64) {
if c.retryTimer != nil {
return
}
if len(interval) > 1 {
return
}
timerInterval := retryInterval
if len(interval) == 1 {
timerInterval = interval[0]
}
c.retryTimerLock.Lock()
c.retryTimer = time.AfterFunc(time.Duration(timerInterval)*time.Second, c.retryDelivery)
c.retryTimerLock.Unlock()
return
}
func (c *client) resetRetryTimer() {
if c.retryTimer == nil {
return
}
// reset timer
c.retryTimerLock.Lock()
c.retryTimer = nil
c.retryTimerLock.Unlock()
}
func (c *client) retryDelivery() {
c.resetRetryTimer()
if c.conn == nil || len(c.inflight) == 0 { //Reset timer when client offline OR inflight is empty
return
}
now := time.Now().Unix()
for _, infEle := range c.inflight {
age := now - infEle.timestamp
if age >= retryInterval {
if infEle.status == Publish {
c.WriterPacket(infEle.packet)
c.inflight[infEle.packet.MessageID].timestamp = now
} else if infEle.status == Pubrel {
pubrel := packets.NewControlPacket(packets.Pubrel).(*packets.PubrelPacket)
pubrel.MessageID = infEle.packet.MessageID
c.WriterPacket(pubrel)
c.inflight[infEle.packet.MessageID].timestamp = now
}
} else {
if age < 0 {
age = 0
}
c.ensureRetryTimer(retryInterval - age)
}
}
c.ensureRetryTimer()
} }

View File

@@ -1,5 +1,3 @@
/* Copyright (c) 2018, joy.zhou <chowyu08@gmail.com>
*/
package broker package broker
import ( import (
@@ -13,24 +11,36 @@ import (
"os" "os"
"github.com/fhmq/hmq/logger" "github.com/fhmq/hmq/logger"
"github.com/fhmq/hmq/plugins/auth"
"github.com/fhmq/hmq/plugins/bridge"
"go.uber.org/zap" "go.uber.org/zap"
) )
type Config struct { type Config struct {
Worker int `json:"workerNum"` Worker int `json:"workerNum"`
Host string `json:"host"` HTTPPort string `json:"httpPort"`
Port string `json:"port"` Host string `json:"host"`
Cluster RouteInfo `json:"cluster"` Port string `json:"port"`
Router string `json:"router"` Cluster RouteInfo `json:"cluster"`
TlsHost string `json:"tlsHost"` Router string `json:"router"`
TlsPort string `json:"tlsPort"` TlsHost string `json:"tlsHost"`
WsPath string `json:"wsPath"` TlsPort string `json:"tlsPort"`
WsPort string `json:"wsPort"` WsPath string `json:"wsPath"`
WsTLS bool `json:"wsTLS"` WsPort string `json:"wsPort"`
TlsInfo TLSInfo `json:"tlsInfo"` WsTLS bool `json:"wsTLS"`
Acl bool `json:"acl"` TlsInfo TLSInfo `json:"tlsInfo"`
AclConf string `json:"aclConf"` Debug bool `json:"debug"`
Debug bool `json:"-"` Plugin Plugins `json:"plugins"`
}
type Plugins struct {
Auth auth.Auth
Bridge bridge.BridgeMQ
}
type NamedPlugins struct {
Auth string
Bridge string
} }
type RouteInfo struct { type RouteInfo struct {
@@ -49,11 +59,10 @@ var DefaultConfig *Config = &Config{
Worker: 4096, Worker: 4096,
Host: "0.0.0.0", Host: "0.0.0.0",
Port: "1883", Port: "1883",
Acl: false,
} }
var ( var (
log *zap.Logger log = logger.Prod().Named("broker")
) )
func showHelp() { func showHelp() {
@@ -74,6 +83,8 @@ func ConfigureConfig(args []string) (*Config, error) {
fs.BoolVar(&help, "help", false, "Show this message.") fs.BoolVar(&help, "help", false, "Show this message.")
fs.IntVar(&config.Worker, "w", 1024, "worker num to process message, perfer (client num)/10.") fs.IntVar(&config.Worker, "w", 1024, "worker num to process message, perfer (client num)/10.")
fs.IntVar(&config.Worker, "worker", 1024, "worker num to process message, perfer (client num)/10.") fs.IntVar(&config.Worker, "worker", 1024, "worker num to process message, perfer (client num)/10.")
fs.StringVar(&config.HTTPPort, "httpport", "8080", "Port to listen on.")
fs.StringVar(&config.HTTPPort, "hp", "8080", "Port to listen on.")
fs.StringVar(&config.Port, "port", "1883", "Port to listen on.") fs.StringVar(&config.Port, "port", "1883", "Port to listen on.")
fs.StringVar(&config.Port, "p", "1883", "Port to listen on.") fs.StringVar(&config.Port, "p", "1883", "Port to listen on.")
fs.StringVar(&config.Host, "host", "0.0.0.0", "Network host to listen on") fs.StringVar(&config.Host, "host", "0.0.0.0", "Network host to listen on")
@@ -108,9 +119,6 @@ func ConfigureConfig(args []string) (*Config, error) {
} }
}) })
logger.InitLogger(config.Debug)
log = logger.Get().Named("Broker")
if configFile != "" { if configFile != "" {
tmpConfig, e := LoadConfig(configFile) tmpConfig, e := LoadConfig(configFile)
if e != nil { if e != nil {
@@ -120,6 +128,10 @@ func ConfigureConfig(args []string) (*Config, error) {
} }
} }
if config.Debug {
log = logger.Debug().Named("broker")
}
if err := config.check(); err != nil { if err := config.check(); err != nil {
return nil, err return nil, err
} }
@@ -132,7 +144,7 @@ func LoadConfig(filename string) (*Config, error) {
content, err := ioutil.ReadFile(filename) content, err := ioutil.ReadFile(filename)
if err != nil { if err != nil {
log.Error("Read config file error: ", zap.Error(err)) // log.Error("Read config file error: ", zap.Error(err))
return nil, err return nil, err
} }
// log.Info(string(content)) // log.Info(string(content))
@@ -140,13 +152,25 @@ func LoadConfig(filename string) (*Config, error) {
var config Config var config Config
err = json.Unmarshal(content, &config) err = json.Unmarshal(content, &config)
if err != nil { if err != nil {
log.Error("Unmarshal config file error: ", zap.Error(err)) // log.Error("Unmarshal config file error: ", zap.Error(err))
return nil, err return nil, err
} }
return &config, nil return &config, nil
} }
func (p *Plugins) UnmarshalJSON(b []byte) error {
var named NamedPlugins
err := json.Unmarshal(b, &named)
if err != nil {
return err
}
p.Auth = auth.NewAuth(named.Auth)
p.Bridge = bridge.NewBridgeMQ(named.Bridge)
return nil
}
func (config *Config) check() error { func (config *Config) check() error {
if config.Worker == 0 { if config.Worker == 0 {

26
broker/http.go Normal file
View File

@@ -0,0 +1,26 @@
package broker
import (
"github.com/gin-gonic/gin"
)
func InitHTTPMoniter(b *Broker) {
gin.SetMode(gin.ReleaseMode)
router := gin.Default()
router.DELETE("api/v1/connections/:clientid", func(c *gin.Context) {
clientid := c.Param("clientid")
cli, ok := b.clients.Load(clientid)
if ok {
conn, succss := cli.(*client)
if succss {
conn.Close()
}
}
resp := map[string]int{
"code": 0,
}
c.JSON(200, &resp)
})
router.Run(":" + b.config.HTTPPort)
}

View File

@@ -1,5 +1,3 @@
/* Copyright (c) 2018, joy.zhou <chowyu08@gmail.com>
*/
package broker package broker
import ( import (
@@ -48,6 +46,8 @@ func (c *client) SendConnect() {
return return
} }
m := packets.NewControlPacket(packets.Connect).(*packets.ConnectPacket) m := packets.NewControlPacket(packets.Connect).(*packets.ConnectPacket)
m.ProtocolName = "MQIsdp"
m.ProtocolVersion = 3
m.CleanSession = true m.CleanSession = true
m.ClientIdentifier = c.info.clientID m.ClientIdentifier = c.info.clientID

View File

@@ -1,17 +1,3 @@
// Copyright (c) 2014 The SurgeMQ Authors. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package sessions package sessions
import ( import (

View File

@@ -104,7 +104,7 @@ func (this *memTopics) Retain(msg *packets.PublishPacket) error {
return this.rroot.rremove([]byte(msg.TopicName)) return this.rroot.rremove([]byte(msg.TopicName))
} }
return this.rroot.rinsert([]byte(msg.TopicName), msg) return this.rroot.rinsertOrUpdate([]byte(msg.TopicName), msg)
} }
func (this *memTopics) Retained(topic []byte, msgs *[]*packets.PublishPacket) error { func (this *memTopics) Retained(topic []byte, msgs *[]*packets.PublishPacket) error {
@@ -244,6 +244,9 @@ func (this *snode) smatch(topic []byte, qos byte, subs *[]interface{}, qoss *[]b
// let's find the subscribers that match the qos and append them to the list. // let's find the subscribers that match the qos and append them to the list.
if len(topic) == 0 { if len(topic) == 0 {
this.matchQos(qos, subs, qoss) this.matchQos(qos, subs, qoss)
if mwcn, _ := this.snodes[MWC]; mwcn != nil {
mwcn.matchQos(qos, subs, qoss)
}
return nil return nil
} }
@@ -283,13 +286,11 @@ func newRNode() *rnode {
} }
} }
func (this *rnode) rinsert(topic []byte, msg *packets.PublishPacket) error { func (this *rnode) rinsertOrUpdate(topic []byte, msg *packets.PublishPacket) error {
// If there's no more topic levels, that means we are at the matching rnode. // If there's no more topic levels, that means we are at the matching rnode.
if len(topic) == 0 { if len(topic) == 0 {
// Reuse the message if possible // Reuse the message if possible
if this.msg == nil { this.msg = msg
this.msg = msg
}
return nil return nil
} }
@@ -312,7 +313,7 @@ func (this *rnode) rinsert(topic []byte, msg *packets.PublishPacket) error {
this.rnodes[level] = n this.rnodes[level] = n
} }
return n.rinsert(rem, msg) return n.rinsertOrUpdate(rem, msg)
} }
// Remove the retained message for the supplied topic // Remove the retained message for the supplied topic

View File

@@ -2,10 +2,12 @@
"workerNum": 4096, "workerNum": 4096,
"port": "1883", "port": "1883",
"host": "0.0.0.0", "host": "0.0.0.0",
"debug": true,
"cluster": { "cluster": {
"host": "0.0.0.0", "host": "0.0.0.0",
"port": "1993" "port": "1993"
}, },
"httpPort": "8080",
"router": "127.0.0.1:9888", "router": "127.0.0.1:9888",
"tlsPort": "8883", "tlsPort": "8883",
"tlsHost": "0.0.0.0", "tlsHost": "0.0.0.0",
@@ -18,6 +20,8 @@
"certFile": "ssl/server/cert.pem", "certFile": "ssl/server/cert.pem",
"keyFile": "ssl/server/key.pem" "keyFile": "ssl/server/key.pem"
}, },
"acl": false, "plugins": {
"aclConf": "conf/acl.conf" "auth": "authhttp",
"bridge": "kafka"
}
} }

37
deploy/config.yaml Normal file
View File

@@ -0,0 +1,37 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: mqtt-broker
data:
hmq.config: |
{
"workerNum": 4096,
"port": "1883",
"host": "0.0.0.0",
"plugins": ["authhttp","kafka"]
}
kafka.json: |
{
"addr": [
"127.0.0.1:9090"
],
"onConnect": "onConnect",
"onPublish": "onPublish",
"onSubscribe": "onSubscribe",
"onDisconnect": "onDisconnect",
"onUnsubscribe": "onUnsubscribe",
"deliverMap": {
"#": "publish",
"/upload/+/#": "upload"
}
}
authhttp.json: |
{
"auth": "http://127.0.0.1:9090/mqtt/auth",
"acl": "http://127.0.0.1:9090/mqtt/acl",
"super": "http://127.0.0.1:9090/mqtt/superuser"
}

44
deploy/deploy.yaml Normal file
View File

@@ -0,0 +1,44 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: mqtt-broker
spec:
selector:
matchLabels:
app: mqtt-broker
replicas: 1
template:
metadata:
labels:
app: mqtt-broker
spec:
containers:
- name: mqtt-broker
image: uhub.service.ucloud.cn/uiot_core_hub/hmq:v0.1.0
ports:
- containerPort: 1883
- containerPort: 8080
volumeMounts:
- name: mqtt-broker
mountPath: /conf
subPath: hmq.config
- name: mqtt-broker
mountPath: /plugins/kafka/kafka.json
subPath: kafka.json
- name: mqtt-broker
mountPath: /plugins/authttp/http.json
subPath: kafka.json
volumes:
- name: mqtt-broker
configMap:
name: mqtt-broker
items:
- key: hmq.config
path: hmq.config
items:
- key: http.json
path: http.json
items:
- key: kafka.json
path: kafka.json

13
deploy/svc.yaml Normal file
View File

@@ -0,0 +1,13 @@
kind: Service
apiVersion: v1
metadata:
name: mqtt-broker
spec:
selector:
app: mqtt-broker
ports:
- protocol: TCP
port: 1883
targetPort: 1883
type: ClusterIP
sessionAffinity: ClientIP

28
go.mod Normal file
View File

@@ -0,0 +1,28 @@
module github.com/fhmq/hmq
go 1.12
require (
github.com/Shopify/sarama v1.23.0
github.com/bitly/go-simplejson v0.5.0
github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 // indirect
github.com/eapache/queue v1.1.0
github.com/eclipse/paho.mqtt.golang v1.2.0
github.com/gin-gonic/gin v1.4.0
github.com/golang/protobuf v1.3.2 // indirect
github.com/google/uuid v1.1.1
github.com/kr/pretty v0.1.0 // indirect
github.com/patrickmn/go-cache v2.1.0+incompatible
github.com/pkg/errors v0.8.1 // indirect
github.com/segmentio/fasthash v0.0.0-20180216231524-a72b379d632e
github.com/stretchr/testify v1.3.0
github.com/tidwall/gjson v1.3.0
go.uber.org/atomic v1.4.0 // indirect
go.uber.org/multierr v1.1.0 // indirect
go.uber.org/zap v1.10.0
golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4 // indirect
golang.org/x/net v0.0.0-20190724013045-ca1201d0de80
golang.org/x/sys v0.0.0-20190730183949-1393eb018365 // indirect
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 // indirect
gopkg.in/jcmturner/goidentity.v3 v3.0.0 // indirect
)

119
go.sum Normal file
View File

@@ -0,0 +1,119 @@
github.com/DataDog/zstd v1.3.6-0.20190409195224-796139022798 h1:2T/jmrHeTezcCM58lvEQXs0UpQJCo5SoGAcg+mbSTIg=
github.com/DataDog/zstd v1.3.6-0.20190409195224-796139022798/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo=
github.com/Shopify/sarama v1.23.0 h1:slvlbm7bxyp7sKQbUwha5BQdZTqurhRoI+zbKorVigQ=
github.com/Shopify/sarama v1.23.0/go.mod h1:XLH1GYJnLVE0XCr6KdJGVJRTwY30moWNJ4sERjXX6fs=
github.com/Shopify/toxiproxy v2.1.4+incompatible h1:TKdv8HiTLgE5wdJuEML90aBgNWsokNbMijUGhmcoBJc=
github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI=
github.com/bitly/go-simplejson v0.5.0 h1:6IH+V8/tVMab511d5bn4M7EwGXZf9Hj6i2xSwkNEM+Y=
github.com/bitly/go-simplejson v0.5.0/go.mod h1:cXHtHw4XUPsvGaxgjIAn8PhEWG9NfngEKAMDJEczWVA=
github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 h1:DDGfHa7BWjL4YnC6+E63dPcxHo2sUxDIu8g3QgEJdRY=
github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4=
github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/eapache/go-resiliency v1.1.0 h1:1NtRmCAqadE2FN4ZcN6g90TP3uk8cg9rn9eNK2197aU=
github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs=
github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21 h1:YEetp8/yCZMuEPMUDHG0CW/brkkEp8mzqk2+ODEitlw=
github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU=
github.com/eapache/queue v1.1.0 h1:YOEu7KNc61ntiQlcEeUIoDTJ2o8mQznoNvUhiigpIqc=
github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I=
github.com/eclipse/paho.mqtt.golang v1.2.0 h1:1F8mhG9+aO5/xpdtFkW4SxOJB67ukuDC3t2y2qayIX0=
github.com/eclipse/paho.mqtt.golang v1.2.0/go.mod h1:H9keYFcgq3Qr5OUJm/JZI/i6U7joQ8SYLhZwfeOo6Ts=
github.com/gin-contrib/sse v0.0.0-20190301062529-5545eab6dad3 h1:t8FVkw33L+wilf2QiWkw0UV77qRpcH/JHPKGpKa2E8g=
github.com/gin-contrib/sse v0.0.0-20190301062529-5545eab6dad3/go.mod h1:VJ0WA2NBN22VlZ2dKZQPAPnyWw5XTlK1KymzLKsr59s=
github.com/gin-gonic/gin v1.4.0 h1:3tMoCCfM7ppqsR0ptz/wi1impNpT7/9wQtMZ8lr1mCQ=
github.com/gin-gonic/gin v1.4.0/go.mod h1:OW2EZn3DO8Ln9oIKOvM++LBO+5UPHJJDH72/q/3rZdM=
github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg=
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4=
github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY=
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/hashicorp/go-uuid v1.0.1 h1:fv1ep09latC32wFoVwnqcnKJGnMSdBanPczbHAYm1BE=
github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
github.com/jcmturner/gofork v0.0.0-20190328161633-dc7c13fece03 h1:FUwcHNlEqkqLjLBdCp5PRlCFijNjvcYANOZXzCfXwCM=
github.com/jcmturner/gofork v0.0.0-20190328161633-dc7c13fece03/go.mod h1:MK8+TM0La+2rjBD4jE12Kj1pCCxK7d2LK/UM3ncEo0o=
github.com/json-iterator/go v1.1.6 h1:MrUvLMLTMxbqFJ9kzlvat/rYZqZnW3u4wkLzWTaFwKs=
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/mattn/go-isatty v0.0.7 h1:UvyT9uN+3r7yLEYSlJsbQGdsaB/a0DlgWP3pql6iwOc=
github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI=
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/patrickmn/go-cache v2.1.0+incompatible h1:HRMgzkcYKYpi3C8ajMPV8OFXaaRUnok+kx1WdO15EQc=
github.com/patrickmn/go-cache v2.1.0+incompatible/go.mod h1:3Qf8kWWT7OJRJbdiICTKqZju1ZixQ/KpMGzzAfe6+WQ=
github.com/pierrec/lz4 v0.0.0-20190327172049-315a67e90e41 h1:GeinFsrjWz97fAxVUEd748aV0cYL+I6k44gFJTCVvpU=
github.com/pierrec/lz4 v0.0.0-20190327172049-315a67e90e41/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc=
github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a h1:9ZKAASQSHhDYGoxY8uLVpewe1GDZ2vu2Tr/vTdVAkFQ=
github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
github.com/segmentio/fasthash v0.0.0-20180216231524-a72b379d632e h1:uO75wNGioszjmIzcY/tvdDYKRLVvzggtAmmJkn9j4GQ=
github.com/segmentio/fasthash v0.0.0-20180216231524-a72b379d632e/go.mod h1:tm/wZFQ8e24NYaBGIlnO2WGCAi67re4HHuOm0sftE/M=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/tidwall/gjson v1.3.0 h1:kfpsw1W3trbg4Xm6doUtqSl9+LhLB6qJ9PkltVAQZYs=
github.com/tidwall/gjson v1.3.0/go.mod h1:P256ACg0Mn+j1RXIDXoss50DeIABTYK1PULOJHhxOls=
github.com/tidwall/match v1.0.1 h1:PnKP62LPNxHKTwvHHZZzdOAOCtsJTjo6dZLCwpKm5xc=
github.com/tidwall/match v1.0.1/go.mod h1:LujAq0jyVjBy028G1WhWfIzbpQfMO8bBZ6Tyb0+pL9E=
github.com/tidwall/pretty v1.0.0 h1:HsD+QiTn7sK6flMKIvNmpqz1qrpP3Ps6jOKIKMooyg4=
github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk=
github.com/ugorji/go v1.1.4 h1:j4s+tAvLfL3bZyefP2SEWmhBzmuIlH/eqNuPdFPgngw=
github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc=
github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I=
github.com/xdg/stringprep v1.0.0/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y=
go.uber.org/atomic v1.4.0 h1:cxzIVoETapQEqDhQu3QfnvXAV4AlzcvUCxkVUFw3+EU=
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/multierr v1.1.0 h1:HoEmRHQPVSqub6w2z2d2EOVs2fjyFRGyofhKuyDq0QI=
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
go.uber.org/zap v1.10.0 h1:ORx85nbTijNz8ljznvCMR1ZBIPKFn3jQrag10X2AsuM=
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190404164418-38d8ce5564a5/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE=
golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4 h1:HuIa8hRrWRSrqYzx1qI49NNxhdi2PrY7gxVSq1JjLDc=
golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c h1:uOCk1iQW6Vc18bnC13MfzScl+wdKBmM9Y9kU7Z83/lw=
golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190724013045-ca1201d0de80 h1:Ao/3l156eZf2AW5wK8a7/smtodRU+gha3+BeqJ69lRk=
golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190730183949-1393eb018365 h1:SaXEMXhWzMJThc05vu6uh61Q245r4KaWMrsTedk0FDc=
golang.org/x/sys v0.0.0-20190730183949-1393eb018365/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/go-playground/assert.v1 v1.2.1 h1:xoYuJVE7KT85PYWrN730RguIQO0ePzVRfFMXadIrXTM=
gopkg.in/go-playground/assert.v1 v1.2.1/go.mod h1:9RXL0bg/zibRAgZUYszZSwO/z8Y/a8bDuhia5mkpMnE=
gopkg.in/go-playground/validator.v8 v8.18.2 h1:lFB4DoMU6B626w8ny76MV7VX6W2VHct2GVOI3xgiMrQ=
gopkg.in/go-playground/validator.v8 v8.18.2/go.mod h1:RX2a/7Ha8BgOhfk7j780h4/u/RRjR0eouCJSH80/M2Y=
gopkg.in/jcmturner/aescts.v1 v1.0.1 h1:cVVZBK2b1zY26haWB4vbBiZrfFQnfbTVrE3xZq6hrEw=
gopkg.in/jcmturner/aescts.v1 v1.0.1/go.mod h1:nsR8qBOg+OucoIW+WMhB3GspUQXq9XorLnQb9XtvcOo=
gopkg.in/jcmturner/dnsutils.v1 v1.0.1 h1:cIuC1OLRGZrld+16ZJvvZxVJeKPsvd5eUIvxfoN5hSM=
gopkg.in/jcmturner/dnsutils.v1 v1.0.1/go.mod h1:m3v+5svpVOhtFAP/wSz+yzh4Mc0Fg7eRhxkJMWSIz9Q=
gopkg.in/jcmturner/goidentity.v3 v3.0.0 h1:1duIyWiTaYvVx3YX2CYtpJbUFd7/UuPYCfgXtQ3VTbI=
gopkg.in/jcmturner/goidentity.v3 v3.0.0/go.mod h1:oG2kH0IvSYNIu80dVAyu/yoefjq1mNfM5bm88whjWx4=
gopkg.in/jcmturner/gokrb5.v7 v7.2.3 h1:hHMV/yKPwMnJhPuPx7pH2Uw/3Qyf+thJYlisUc44010=
gopkg.in/jcmturner/gokrb5.v7 v7.2.3/go.mod h1:l8VISx+WGYp+Fp7KRbsiUuXTTOnxIc3Tuvyavf11/WM=
gopkg.in/jcmturner/rpc.v1 v1.1.0 h1:QHIUxTX1ISuAv9dD2wJ9HWQVuWDX/Zc0PfeC2tjc4rU=
gopkg.in/jcmturner/rpc.v1 v1.1.0/go.mod h1:YIdkC4XfD6GXbzje11McwsDuOlZQSb9W4vfLvuNnlv8=
gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=

View File

@@ -1,95 +0,0 @@
package sessions
import (
"time"
log "github.com/cihub/seelog"
"github.com/go-redis/redis"
jsoniter "github.com/json-iterator/go"
)
var redisClient *redis.Client
var _ SessionsProvider = (*redisProvider)(nil)
const (
sessionName = "session"
)
type redisProvider struct {
}
func init() {
Register("redis", NewRedisProvider())
}
func InitRedisConn(url string) {
redisClient = redis.NewClient(&redis.Options{
Addr: "127.0.0.1:6379",
Password: "", // no password set
DB: 0, // use default DB
})
err := redisClient.Ping().Err()
for err != nil {
log.Error("connect redis error: ", err, " 3s try again...")
time.Sleep(3 * time.Second)
err = redisClient.Ping().Err()
}
}
func NewRedisProvider() *redisProvider {
return &redisProvider{}
}
func (r *redisProvider) New(id string) (*Session, error) {
val, _ := jsoniter.Marshal(&Session{id: id})
err := redisClient.HSet(sessionName, id, val).Err()
if err != nil {
return nil, err
}
result, err := redisClient.HGet(sessionName, id).Bytes()
if err != nil {
return nil, err
}
sess := Session{}
err = jsoniter.Unmarshal(result, &sess)
if err != nil {
return nil, err
}
return &sess, nil
}
func (r *redisProvider) Get(id string) (*Session, error) {
result, err := redisClient.HGet(sessionName, id).Bytes()
if err != nil {
return nil, err
}
sess := Session{}
err = jsoniter.Unmarshal(result, &sess)
if err != nil {
return nil, err
}
return &sess, nil
}
func (r *redisProvider) Del(id string) {
redisClient.HDel(sessionName, id)
}
func (r *redisProvider) Save(id string) error {
return nil
}
func (r *redisProvider) Count() int {
return int(redisClient.HLen(sessionName).Val())
}
func (r *redisProvider) Close() error {
return redisClient.Del(sessionName).Err()
}

View File

@@ -5,17 +5,27 @@ package logger
import ( import (
"go.uber.org/zap" "go.uber.org/zap"
"go.uber.org/zap/zapcore"
) )
var ( var (
// env can be setup at build time with Go Linker. Value could be prod or whatever else for dev env // env can be setup at build time with Go Linker. Value could be prod or whatever else for dev env
instance *zap.Logger instance *zap.Logger
logCfg zap.Config logCfg zap.Config
encoderCfg = zap.NewProductionEncoderConfig()
) )
func init() {
encoderCfg.TimeKey = "timestamp"
encoderCfg.EncodeTime = zapcore.ISO8601TimeEncoder
}
// NewDevLogger return a logger for dev builds // NewDevLogger return a logger for dev builds
func NewDevLogger() (*zap.Logger, error) { func NewDevLogger() (*zap.Logger, error) {
logCfg := zap.NewDevelopmentConfig() logCfg := zap.NewProductionConfig()
logCfg.Level = zap.NewAtomicLevelAt(zap.DebugLevel)
// logCfg.DisableStacktrace = true
logCfg.EncoderConfig = encoderCfg
return logCfg.Build() return logCfg.Build()
} }
@@ -24,27 +34,31 @@ func NewProdLogger() (*zap.Logger, error) {
logCfg := zap.NewProductionConfig() logCfg := zap.NewProductionConfig()
logCfg.DisableStacktrace = true logCfg.DisableStacktrace = true
logCfg.Level = zap.NewAtomicLevelAt(zap.InfoLevel) logCfg.Level = zap.NewAtomicLevelAt(zap.InfoLevel)
logCfg.EncoderConfig = encoderCfg
return logCfg.Build() return logCfg.Build()
} }
func InitLogger(debug bool) { func Prod() *zap.Logger {
var err error
var log *zap.Logger
if debug {
log, err = NewDevLogger()
} else {
log, err = NewProdLogger()
}
if err != nil {
panic("Unable to create a logger.")
}
defer log.Sync()
log.Debug("Logger initialization succeeded") l, _ := NewProdLogger()
instance = log.Named("hmq") instance = l
}
return instance
}
func Debug() *zap.Logger {
l, _ := NewDevLogger()
instance = l
return instance
}
func Get() *zap.Logger {
if instance == nil {
l, _ := NewProdLogger()
instance = l
}
// Get return a *zap.Logger instance
func Get() *zap.Logger {
return instance return instance
} }

View File

@@ -1,5 +1,6 @@
/* Copyright (c) 2018, joy.zhou <chowyu08@gmail.com> /*
*/ Copyright (c) 2018, joy.zhou <chowyu08@gmail.com>
*/
package logger package logger
import ( import (

16
main.go
View File

@@ -1,13 +1,7 @@
/* Copyright (c) 2018, joy.zhou <chowyu08@gmail.com>
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
*/
package main package main
import ( import (
"fmt" "log"
"os" "os"
"os/signal" "os/signal"
"runtime" "runtime"
@@ -19,19 +13,17 @@ func main() {
runtime.GOMAXPROCS(runtime.NumCPU()) runtime.GOMAXPROCS(runtime.NumCPU())
config, err := broker.ConfigureConfig(os.Args[1:]) config, err := broker.ConfigureConfig(os.Args[1:])
if err != nil { if err != nil {
fmt.Println("configure broker config error: ", err) log.Fatal("configure broker config error: ", err)
return
} }
b, err := broker.NewBroker(config) b, err := broker.NewBroker(config)
if err != nil { if err != nil {
fmt.Println("New Broker error: ", err) log.Fatal("New Broker error: ", err)
return
} }
b.Start() b.Start()
s := waitForSignal() s := waitForSignal()
fmt.Println("signal received, broker closed.", s) log.Println("signal received, broker closed.", s)
} }
func waitForSignal() os.Signal { func waitForSignal() os.Signal {

27
plugins/auth/auth.go Normal file
View File

@@ -0,0 +1,27 @@
package auth
import (
authfile "github.com/fhmq/hmq/plugins/auth/authfile"
"github.com/fhmq/hmq/plugins/auth/authhttp"
)
const (
AuthHTTP = "authhttp"
AuthFile = "authfile"
)
type Auth interface {
CheckACL(action, clientID, username, ip, topic string) bool
CheckConnect(clientID, username, password string) bool
}
func NewAuth(name string) Auth {
switch name {
case AuthHTTP:
return authhttp.Init()
case AuthFile:
return authfile.Init()
default:
return &mockAuth{}
}
}

View File

@@ -0,0 +1,54 @@
## ACL Configure
```
Attention: Acl Type Change, change `pub =1, sub=2` to `sub =1, pub=2`
```
#### The ACL rules define:
~~~
Allow | type | value | pubsub | Topics
~~~
#### ACL Config
~~~
## type clientid , username, ipaddr
##sub 1 , pub 2, pubsub 3
## %c is clientid , %u is username
allow ip 127.0.0.1 2 $SYS/#
allow clientid 0001 3 #
allow username admin 3 #
allow username joy 3 /test,hello/world
allow clientid * 1 toCloud/%c
allow username * 1 toCloud/%u
deny clientid * 3 #
~~~
~~~
#allow local sub $SYS topic
allow ip 127.0.0.1 1 $SYS/#
~~~
~~~
#allow client who's id with 0001 or username with admin pub sub all topic
allow clientid 0001 3 #
allow username admin 3 #
~~~
~~~
#allow client with the username joy can pub sub topic '/test' and 'hello/world'
allow username joy 3 /test,hello/world
~~~
~~~
#allow all client pub the topic toCloud/{clientid/username}
allow clientid * 2 toCloud/%c
allow username * 2 toCloud/%u
~~~
~~~
#deny all client pub sub all topic
deny clientid * 3 #
~~~
Client match acl rule one by one
~~~
--------- --------- ---------
Client -> | Rule1 | --nomatch--> | Rule2 | --nomatch--> | Rule3 | -->
--------- --------- ---------
| | |
match match match
\|/ \|/ \|/
allow | deny allow | deny allow | deny
~~~

View File

@@ -1,4 +1,4 @@
## pub 1 , sub 2, pubsub 3 ## sub 1 , pub 2, pubsub 3
## %c is clientid , %s is username ## %c is clientid , %s is username
##auth type value pub/sub topic ##auth type value pub/sub topic
allow ip 127.0.0.1 2 $SYS/# allow ip 127.0.0.1 2 $SYS/#
@@ -9,4 +9,4 @@ allow clientid * 1 toCloud/%c
allow username * 1 toCloud/%u allow username * 1 toCloud/%u
allow clientid * 2 toDevice/%c allow clientid * 2 toDevice/%c
allow username * 2 toDevice/%u allow username * 2 toDevice/%u
deny clientid * 3 # deny clientid * 3 #

View File

@@ -0,0 +1,23 @@
package acl
type aclAuth struct {
config *ACLConfig
}
func Init() *aclAuth {
aclConfig, err := AclConfigLoad("./plugins/auth/authfile/acl.conf")
if err != nil {
panic(err)
}
return &aclAuth{
config: aclConfig,
}
}
func (a *aclAuth) CheckConnect(clientID, username, password string) bool {
return true
}
func (a *aclAuth) CheckACL(action, clientID, username, ip, topic string) bool {
return checkTopicAuth(a.config, action, ip, username, clientID, topic)
}

View File

@@ -0,0 +1,23 @@
//+build test
package acl
import (
"os"
"testing"
"github.com/stretchr/testify/assert"
)
func TestOrigAcls(t *testing.T) {
pwd, _ := os.Getwd()
os.Chdir("../../../")
aclOrig := Init()
os.Chdir(pwd)
// rule: allow ip 127.0.0.1 2 $SYS/#
origAllowed := aclOrig.CheckACL(PUB, "dummyClientID", "dummyUser", "127.0.0.1", "$SYS/something")
assert.True(t, origAllowed)
origAllowed = aclOrig.CheckACL(SUB, "dummyClientID", "dummyUser", "127.0.0.1", "$SYS/something")
assert.False(t, origAllowed)
}

View File

@@ -1,22 +1,21 @@
/* Copyright (c) 2018, joy.zhou <chowyu08@gmail.com>*/
package acl package acl
import "strings" import "strings"
func CheckTopicAuth(ACLInfo *ACLConfig, typ int, ip, username, clientid, topic string) bool { func checkTopicAuth(ACLInfo *ACLConfig, action, ip, username, clientid, topic string) bool {
for _, info := range ACLInfo.Info { for _, info := range ACLInfo.Info {
ctyp := info.Typ ctyp := info.Typ
switch ctyp { switch ctyp {
case CLIENTID: case CLIENTID:
if match, auth := info.checkWithClientID(typ, clientid, topic); match { if match, auth := info.checkWithClientID(action, clientid, topic); match {
return auth return auth
} }
case USERNAME: case USERNAME:
if match, auth := info.checkWithUsername(typ, username, topic); match { if match, auth := info.checkWithUsername(action, username, topic); match {
return auth return auth
} }
case IP: case IP:
if match, auth := info.checkWithIP(typ, ip, topic); match { if match, auth := info.checkWithIP(action, ip, topic); match {
return auth return auth
} }
} }
@@ -24,18 +23,18 @@ func CheckTopicAuth(ACLInfo *ACLConfig, typ int, ip, username, clientid, topic s
return false return false
} }
func (a *AuthInfo) checkWithClientID(typ int, clientid, topic string) (bool, bool) { func (a *AuthInfo) checkWithClientID(action, clientid, topic string) (bool, bool) {
auth := false auth := false
match := false match := false
if a.Val == "*" || a.Val == clientid { if a.Val == "*" || a.Val == clientid {
for _, tp := range a.Topics { for _, tp := range a.Topics {
des := strings.Replace(tp, "%c", clientid, -1) des := strings.Replace(tp, "%c", clientid, -1)
if typ == PUB { if action == PUB {
if pubTopicMatch(topic, des) { if pubTopicMatch(topic, des) {
match = true match = true
auth = a.checkAuth(PUB) auth = a.checkAuth(PUB)
} }
} else if typ == SUB { } else if action == SUB {
if subTopicMatch(topic, des) { if subTopicMatch(topic, des) {
match = true match = true
auth = a.checkAuth(SUB) auth = a.checkAuth(SUB)
@@ -46,18 +45,18 @@ func (a *AuthInfo) checkWithClientID(typ int, clientid, topic string) (bool, boo
return match, auth return match, auth
} }
func (a *AuthInfo) checkWithUsername(typ int, username, topic string) (bool, bool) { func (a *AuthInfo) checkWithUsername(action, username, topic string) (bool, bool) {
auth := false auth := false
match := false match := false
if a.Val == "*" || a.Val == username { if a.Val == "*" || a.Val == username {
for _, tp := range a.Topics { for _, tp := range a.Topics {
des := strings.Replace(tp, "%u", username, -1) des := strings.Replace(tp, "%u", username, -1)
if typ == PUB { if action == PUB {
if pubTopicMatch(topic, des) { if pubTopicMatch(topic, des) {
match = true match = true
auth = a.checkAuth(PUB) auth = a.checkAuth(PUB)
} }
} else if typ == SUB { } else if action == SUB {
if subTopicMatch(topic, des) { if subTopicMatch(topic, des) {
match = true match = true
auth = a.checkAuth(SUB) auth = a.checkAuth(SUB)
@@ -68,18 +67,18 @@ func (a *AuthInfo) checkWithUsername(typ int, username, topic string) (bool, boo
return match, auth return match, auth
} }
func (a *AuthInfo) checkWithIP(typ int, ip, topic string) (bool, bool) { func (a *AuthInfo) checkWithIP(action, ip, topic string) (bool, bool) {
auth := false auth := false
match := false match := false
if a.Val == "*" || a.Val == ip { if a.Val == "*" || a.Val == ip {
for _, tp := range a.Topics { for _, tp := range a.Topics {
des := tp des := tp
if typ == PUB { if action == PUB {
if pubTopicMatch(topic, des) { if pubTopicMatch(topic, des) {
auth = a.checkAuth(PUB) auth = a.checkAuth(PUB)
match = true match = true
} }
} else if typ == SUB { } else if action == SUB {
if subTopicMatch(topic, des) { if subTopicMatch(topic, des) {
auth = a.checkAuth(SUB) auth = a.checkAuth(SUB)
match = true match = true
@@ -90,15 +89,15 @@ func (a *AuthInfo) checkWithIP(typ int, ip, topic string) (bool, bool) {
return match, auth return match, auth
} }
func (a *AuthInfo) checkAuth(typ int) bool { func (a *AuthInfo) checkAuth(action string) bool {
auth := false auth := false
if typ == PUB { if action == PUB {
if a.Auth == ALLOW && (a.PubSub == PUB || a.PubSub == PUBSUB) { if a.Auth == ALLOW && (a.PubSub == PUB || a.PubSub == PUBSUB) {
auth = true auth = true
} else if a.Auth == DENY && a.PubSub == SUB { } else if a.Auth == DENY && a.PubSub == SUB {
auth = true auth = true
} }
} else if typ == SUB { } else if action == SUB {
if a.Auth == ALLOW && (a.PubSub == SUB || a.PubSub == PUBSUB) { if a.Auth == ALLOW && (a.PubSub == SUB || a.PubSub == PUBSUB) {
auth = true auth = true
} else if a.Auth == DENY && a.PubSub == PUB { } else if a.Auth == DENY && a.PubSub == PUB {

View File

@@ -1,5 +1,3 @@
/* Copyright (c) 2018, joy.zhou <chowyu08@gmail.com>
*/
package acl package acl
import ( import (
@@ -7,14 +5,13 @@ import (
"errors" "errors"
"io" "io"
"os" "os"
"strconv"
"strings" "strings"
) )
const ( const (
PUB = 1 SUB = "1"
SUB = 2 PUB = "2"
PUBSUB = 3 PUBSUB = "3"
CLIENTID = "clientid" CLIENTID = "clientid"
USERNAME = "username" USERNAME = "username"
IP = "ip" IP = "ip"
@@ -26,7 +23,7 @@ type AuthInfo struct {
Auth string Auth string
Typ string Typ string
Val string Val string
PubSub int PubSub string
Topics []string Topics []string
} }
@@ -36,9 +33,6 @@ type ACLConfig struct {
} }
func AclConfigLoad(file string) (*ACLConfig, error) { func AclConfigLoad(file string) (*ACLConfig, error) {
if file == "" {
file = "./conf/acl.conf"
}
aclconifg := &ACLConfig{ aclconifg := &ACLConfig{
File: file, File: file,
Info: make([]*AuthInfo, 0, 4), Info: make([]*AuthInfo, 0, 4),
@@ -81,12 +75,16 @@ func (c *ACLConfig) Prase() error {
parseErr = errors.New("\"" + line + "\" format is error") parseErr = errors.New("\"" + line + "\" format is error")
break break
} }
var pubsub int if tmpArr[3] != PUB && tmpArr[3] != SUB && tmpArr[3] != PUBSUB {
pubsub, err = strconv.Atoi(tmpArr[3])
if err != nil {
parseErr = errors.New("\"" + line + "\" format is error") parseErr = errors.New("\"" + line + "\" format is error")
break break
} }
// var pubsub int
// pubsub, err = strconv.Atoi(tmpArr[3])
// if err != nil {
// parseErr = errors.New("\"" + line + "\" format is error")
// break
// }
topicStr := strings.Replace(tmpArr[4], " ", "", -1) topicStr := strings.Replace(tmpArr[4], " ", "", -1)
topicStr = strings.Replace(topicStr, "\n", "", -1) topicStr = strings.Replace(topicStr, "\n", "", -1)
topics := strings.Split(topicStr, ",") topics := strings.Split(topicStr, ",")
@@ -95,7 +93,7 @@ func (c *ACLConfig) Prase() error {
Typ: tmpArr[1], Typ: tmpArr[1],
Val: tmpArr[2], Val: tmpArr[2],
Topics: topics, Topics: topics,
PubSub: pubsub, PubSub: tmpArr[3],
} }
c.Info = append(c.Info, tmpAuth) c.Info = append(c.Info, tmpAuth)
if err != nil { if err != nil {

View File

@@ -1,5 +1,3 @@
/* Copyright (c) 2018, joy.zhou <chowyu08@gmail.com>
*/
package acl package acl
import ( import (

View File

@@ -0,0 +1,179 @@
package authhttp
import (
"encoding/json"
"io"
"io/ioutil"
"net/http"
"net/url"
"strconv"
"strings"
"time"
"github.com/fhmq/hmq/logger"
"go.uber.org/zap"
)
//Config device kafka config
type Config struct {
AuthURL string `json:"auth"`
ACLURL string `json:"acl"`
SuperURL string `json:"super"`
}
type authHTTP struct {
client *http.Client
}
var (
config Config
log = logger.Get().Named("authhttp")
httpClient *http.Client
)
//Init init kafak client
func Init() *authHTTP {
content, err := ioutil.ReadFile("./plugins/auth/authhttp/http.json")
if err != nil {
log.Fatal("Read config file error: ", zap.Error(err))
}
// log.Info(string(content))
err = json.Unmarshal(content, &config)
if err != nil {
log.Fatal("Unmarshal config file error: ", zap.Error(err))
}
// fmt.Println("http: config: ", config)
httpClient = &http.Client{
Transport: &http.Transport{
MaxConnsPerHost: 100,
MaxIdleConns: 100,
MaxIdleConnsPerHost: 100,
},
Timeout: time.Second * 100,
}
return &authHTTP{client: httpClient}
}
//CheckAuth check mqtt connect
func (a *authHTTP) CheckConnect(clientID, username, password string) bool {
action := "connect"
{
aCache := checkCache(action, clientID, username, password, "")
if aCache != nil {
if aCache.password == password && aCache.username == username && aCache.action == action {
return true
}
}
}
data := url.Values{}
data.Add("username", username)
data.Add("clientid", clientID)
data.Add("password", password)
req, err := http.NewRequest("POST", config.AuthURL, strings.NewReader(data.Encode()))
if err != nil {
log.Error("new request super: ", zap.Error(err))
return false
}
req.Header.Add("Content-Type", "application/x-www-form-urlencoded")
req.Header.Add("Content-Length", strconv.Itoa(len(data.Encode())))
resp, err := a.client.Do(req)
if err != nil {
log.Error("request super: ", zap.Error(err))
return false
}
defer resp.Body.Close()
io.Copy(ioutil.Discard, resp.Body)
if resp.StatusCode == http.StatusOK {
addCache(action, clientID, username, password, "")
return true
}
return false
}
// //CheckSuper check mqtt connect
// func CheckSuper(clientID, username, password string) bool {
// action := "connect"
// {
// aCache := checkCache(action, clientID, username, password, "")
// if aCache != nil {
// if aCache.password == password && aCache.username == username && aCache.action == action {
// return true
// }
// }
// }
// data := url.Values{}
// data.Add("username", username)
// data.Add("clientid", clientID)
// data.Add("password", password)
// req, err := http.NewRequest("POST", config.SuperURL, strings.NewReader(data.Encode()))
// if err != nil {
// log.Error("new request super: ", zap.Error(err))
// return false
// }
// req.Header.Add("Content-Type", "application/x-www-form-urlencoded")
// req.Header.Add("Content-Length", strconv.Itoa(len(data.Encode())))
// resp, err := httpClient.Do(req)
// if err != nil {
// log.Error("request super: ", zap.Error(err))
// return false
// }
// defer resp.Body.Close()
// io.Copy(ioutil.Discard, resp.Body)
// if resp.StatusCode == http.StatusOK {
// return true
// }
// return false
// }
//CheckACL check mqtt connect
func (a *authHTTP) CheckACL(action, clientID, username, ip, topic string) bool {
{
aCache := checkCache(action, "", username, "", topic)
if aCache != nil {
if aCache.topic == topic && aCache.action == action {
return true
}
}
}
req, err := http.NewRequest("GET", config.ACLURL, nil)
if err != nil {
log.Error("get acl: ", zap.Error(err))
return false
}
data := req.URL.Query()
data.Add("username", username)
data.Add("topic", topic)
data.Add("access", action)
req.URL.RawQuery = data.Encode()
// fmt.Println("req:", req)
resp, err := a.client.Do(req)
if err != nil {
log.Error("request acl: ", zap.Error(err))
return false
}
defer resp.Body.Close()
io.Copy(ioutil.Discard, resp.Body)
if resp.StatusCode == http.StatusOK {
addCache(action, "", username, "", topic)
return true
}
return false
}

View File

@@ -0,0 +1,32 @@
package authhttp
import (
"time"
"github.com/patrickmn/go-cache"
)
type authCache struct {
action string
username string
clientID string
password string
topic string
}
var (
// cache = make(map[string]authCache)
c = cache.New(5*time.Minute, 10*time.Minute)
)
func checkCache(action, clientID, username, password, topic string) *authCache {
authc, found := c.Get(username)
if found {
return authc.(*authCache)
}
return nil
}
func addCache(action, clientID, username, password, topic string) {
c.Set(username, &authCache{action: action, username: username, clientID: clientID, password: password, topic: topic}, cache.DefaultExpiration)
}

View File

@@ -0,0 +1,5 @@
{
"auth": "http://127.0.0.1:9090/mqtt/auth",
"acl": "http://127.0.0.1:9090/mqtt/acl",
"super": "http://127.0.0.1:9090/mqtt/superuser"
}

11
plugins/auth/mock.go Normal file
View File

@@ -0,0 +1,11 @@
package auth
type mockAuth struct{}
func (m *mockAuth) CheckACL(action, clientID, username, ip, topic string) bool {
return true
}
func (m *mockAuth) CheckConnect(clientID, username, password string) bool {
return true
}

49
plugins/bridge/bridge.go Normal file
View File

@@ -0,0 +1,49 @@
package bridge
import "github.com/fhmq/hmq/logger"
const (
//Connect mqtt connect
Connect = "connect"
//Publish mqtt publish
Publish = "publish"
//Subscribe mqtt sub
Subscribe = "subscribe"
//Unsubscribe mqtt sub
Unsubscribe = "unsubscribe"
//Disconnect mqtt disconenct
Disconnect = "disconnect"
)
var (
log = logger.Get().Named("bridge")
)
//Elements kafka publish elements
type Elements struct {
ClientID string `json:"clientid"`
Username string `json:"username"`
Topic string `json:"topic"`
Payload string `json:"payload"`
Timestamp int64 `json:"ts"`
Size int32 `json:"size"`
Action string `json:"action"`
}
const (
//Kafka plugin name
Kafka = "kafka"
)
type BridgeMQ interface {
Publish(e *Elements) error
}
func NewBridgeMQ(name string) BridgeMQ {
switch name {
case Kafka:
return InitKafka()
default:
return &mockMQ{}
}
}

156
plugins/bridge/kafka.go Normal file
View File

@@ -0,0 +1,156 @@
package bridge
import (
"encoding/json"
"errors"
"io/ioutil"
"strings"
"time"
"github.com/Shopify/sarama"
"go.uber.org/zap"
)
type kafakConfig struct {
Addr []string `json:"addr"`
ConnectTopic string `json:"onConnect"`
SubscribeTopic string `json:"onSubscribe"`
PublishTopic string `json:"onPublish"`
UnsubscribeTopic string `json:"onUnsubscribe"`
DisconnectTopic string `json:"onDisconnect"`
DeliverMap map[string]string `json:"deliverMap"`
}
type kafka struct {
kafakConfig kafakConfig
kafkaClient sarama.AsyncProducer
}
//Init init kafak client
func InitKafka() *kafka {
log.Info("start connect kafka....")
content, err := ioutil.ReadFile("./plugins/kafka/kafka.json")
if err != nil {
log.Fatal("Read config file error: ", zap.Error(err))
}
// log.Info(string(content))
var config kafakConfig
err = json.Unmarshal(content, &config)
if err != nil {
log.Fatal("Unmarshal config file error: ", zap.Error(err))
}
c := &kafka{kafakConfig: config}
c.connect()
return c
}
//connect
func (k *kafka) connect() {
conf := sarama.NewConfig()
conf.Version = sarama.V1_1_1_0
kafkaClient, err := sarama.NewAsyncProducer(k.kafakConfig.Addr, conf)
if err != nil {
log.Fatal("create kafka async producer failed: ", zap.Error(err))
}
go func() {
for err := range kafkaClient.Errors() {
log.Error("send msg to kafka failed: ", zap.Error(err))
}
}()
k.kafkaClient = kafkaClient
}
//Publish publish to kafka
func (k *kafka) Publish(e *Elements) error {
config := k.kafakConfig
key := e.ClientID
topics := make(map[string]bool)
switch e.Action {
case Connect:
if config.ConnectTopic != "" {
topics[config.ConnectTopic] = true
}
case Publish:
if config.PublishTopic != "" {
topics[config.PublishTopic] = true
}
// foreach regexp map config
for reg, topic := range config.DeliverMap {
match := matchTopic(reg, e.Topic)
if match {
topics[topic] = true
}
}
case Subscribe:
if config.SubscribeTopic != "" {
topics[config.SubscribeTopic] = true
}
case Unsubscribe:
if config.UnsubscribeTopic != "" {
topics[config.UnsubscribeTopic] = true
}
case Disconnect:
if config.DisconnectTopic != "" {
topics[config.DisconnectTopic] = true
}
default:
return errors.New("error action: " + e.Action)
}
return k.publish(topics, key, e)
}
func (k *kafka) publish(topics map[string]bool, key string, msg *Elements) error {
payload, err := json.Marshal(msg)
if err != nil {
return err
}
for topic, _ := range topics {
select {
case k.kafkaClient.Input() <- &sarama.ProducerMessage{
Topic: topic,
Key: sarama.ByteEncoder(key),
Value: sarama.ByteEncoder(payload),
}:
continue
case <-time.After(5 * time.Second):
return errors.New("write kafka timeout")
}
}
return nil
}
func match(subTopic []string, topic []string) bool {
if len(subTopic) == 0 {
if len(topic) == 0 {
return true
}
return false
}
if len(topic) == 0 {
if subTopic[0] == "#" {
return true
}
return false
}
if subTopic[0] == "#" {
return true
}
if (subTopic[0] == "+") || (subTopic[0] == topic[0]) {
return match(subTopic[1:], topic[1:])
}
return false
}
func matchTopic(subTopic string, topic string) bool {
return match(strings.Split(subTopic, "/"), strings.Split(topic, "/"))
}

View File

@@ -0,0 +1,14 @@
{
"addr": [
"127.0.0.1:9090"
],
"onConnect": "onConnect",
"onPublish": "onPublish",
"onSubscribe": "onSubscribe",
"onDisconnect": "onDisconnect",
"onUnsubscribe": "onUnsubscribe",
"deliverMap": {
"#": "publish",
"/upload/+/#": "upload"
}
}

7
plugins/bridge/mock.go Normal file
View File

@@ -0,0 +1,7 @@
package bridge
type mockMQ struct{}
func (m *mockMQ) Publish(e *Elements) error {
return nil
}

58
pool/fixpool.go Normal file
View File

@@ -0,0 +1,58 @@
package pool
import (
"github.com/segmentio/fasthash/fnv1a"
)
type WorkerPool struct {
maxWorkers int
taskQueue []chan func()
stoppedChan chan struct{}
}
func New(maxWorkers int) *WorkerPool {
// There must be at least one worker.
if maxWorkers < 1 {
maxWorkers = 1
}
// taskQueue is unbuffered since items are always removed immediately.
pool := &WorkerPool{
taskQueue: make([]chan func(), maxWorkers),
maxWorkers: maxWorkers,
stoppedChan: make(chan struct{}),
}
// Start the task dispatcher.
pool.dispatch()
return pool
}
func (p *WorkerPool) Submit(uid string, task func()) {
idx := fnv1a.HashString64(uid) % uint64(p.maxWorkers)
if task != nil {
p.taskQueue[idx] <- task
}
}
func (p *WorkerPool) dispatch() {
for i := 0; i < p.maxWorkers; i++ {
p.taskQueue[i] = make(chan func(), 1024)
go startWorker(p.taskQueue[i])
}
}
func startWorker(taskChan chan func()) {
go func() {
var task func()
var ok bool
for {
task, ok = <-taskChan
if !ok {
break
}
// Execute the task.
task()
}
}()
}

View File

@@ -1,166 +1,166 @@
package pool package pool
import "time" // import "time"
const ( // const (
// This value is the size of the queue that workers register their // // This value is the size of the queue that workers register their
// availability to the dispatcher. There may be hundreds of workers, but // // availability to the dispatcher. There may be hundreds of workers, but
// only a small channel is needed to register some of the workers. // // only a small channel is needed to register some of the workers.
readyQueueSize = 16 // readyQueueSize = 64
// If worker pool receives no new work for this period of time, then stop // // If worker pool receives no new work for this period of time, then stop
// a worker goroutine. // // a worker goroutine.
idleTimeoutSec = 5 // idleTimeoutSec = 5
) // )
type WorkerPool struct { // type WorkerPool struct {
maxWorkers int // maxWorkers int
timeout time.Duration // timeout time.Duration
taskQueue chan func() // taskQueue chan func()
readyWorkers chan chan func() // readyWorkers chan chan func()
stoppedChan chan struct{} // stoppedChan chan struct{}
} // }
func New(maxWorkers int) *WorkerPool { // func New(maxWorkers int) *WorkerPool {
// There must be at least one worker. // // There must be at least one worker.
if maxWorkers < 1 { // if maxWorkers < 1 {
maxWorkers = 1 // maxWorkers = 1
} // }
// taskQueue is unbuffered since items are always removed immediately. // // taskQueue is unbuffered since items are always removed immediately.
pool := &WorkerPool{ // pool := &WorkerPool{
taskQueue: make(chan func()), // taskQueue: make(chan func()),
maxWorkers: maxWorkers, // maxWorkers: maxWorkers,
readyWorkers: make(chan chan func(), readyQueueSize), // readyWorkers: make(chan chan func(), readyQueueSize),
timeout: time.Second * idleTimeoutSec, // timeout: time.Second * idleTimeoutSec,
stoppedChan: make(chan struct{}), // stoppedChan: make(chan struct{}),
} // }
// Start the task dispatcher. // // Start the task dispatcher.
go pool.dispatch() // go pool.dispatch()
return pool // return pool
} // }
func (p *WorkerPool) Stop() { // func (p *WorkerPool) Stop() {
if p.Stopped() { // if p.Stopped() {
return // return
} // }
close(p.taskQueue) // close(p.taskQueue)
<-p.stoppedChan // <-p.stoppedChan
} // }
func (p *WorkerPool) Stopped() bool { // func (p *WorkerPool) Stopped() bool {
select { // select {
case <-p.stoppedChan: // case <-p.stoppedChan:
return true // return true
default: // default:
} // }
return false // return false
} // }
func (p *WorkerPool) Submit(task func()) { // func (p *WorkerPool) Submit(task func()) {
if task != nil { // if task != nil {
p.taskQueue <- task // p.taskQueue <- task
} // }
} // }
func (p *WorkerPool) SubmitWait(task func()) { // func (p *WorkerPool) SubmitWait(task func()) {
if task == nil { // if task == nil {
return // return
} // }
doneChan := make(chan struct{}) // doneChan := make(chan struct{})
p.taskQueue <- func() { // p.taskQueue <- func() {
task() // task()
close(doneChan) // close(doneChan)
} // }
<-doneChan // <-doneChan
} // }
func (p *WorkerPool) dispatch() { // func (p *WorkerPool) dispatch() {
defer close(p.stoppedChan) // defer close(p.stoppedChan)
timeout := time.NewTimer(p.timeout) // timeout := time.NewTimer(p.timeout)
var workerCount int // var workerCount int
var task func() // var task func()
var ok bool // var ok bool
var workerTaskChan chan func() // var workerTaskChan chan func()
startReady := make(chan chan func()) // startReady := make(chan chan func())
Loop: // Loop:
for { // for {
timeout.Reset(p.timeout) // timeout.Reset(p.timeout)
select { // select {
case task, ok = <-p.taskQueue: // case task, ok = <-p.taskQueue:
if !ok { // if !ok {
break Loop // break Loop
} // }
// Got a task to do. // // Got a task to do.
select { // select {
case workerTaskChan = <-p.readyWorkers: // case workerTaskChan = <-p.readyWorkers:
// A worker is ready, so give task to worker. // // A worker is ready, so give task to worker.
workerTaskChan <- task // workerTaskChan <- task
default: // default:
// No workers ready. // // No workers ready.
// Create a new worker, if not at max. // // Create a new worker, if not at max.
if workerCount < p.maxWorkers { // if workerCount < p.maxWorkers {
workerCount++ // workerCount++
go func(t func()) { // go func(t func()) {
startWorker(startReady, p.readyWorkers) // startWorker(startReady, p.readyWorkers)
// Submit the task when the new worker. // // Submit the task when the new worker.
taskChan := <-startReady // taskChan := <-startReady
taskChan <- t // taskChan <- t
}(task) // }(task)
} else { // } else {
// Start a goroutine to submit the task when an existing // // Start a goroutine to submit the task when an existing
// worker is ready. // // worker is ready.
go func(t func()) { // go func(t func()) {
taskChan := <-p.readyWorkers // taskChan := <-p.readyWorkers
taskChan <- t // taskChan <- t
}(task) // }(task)
} // }
} // }
case <-timeout.C: // case <-timeout.C:
// Timed out waiting for work to arrive. Kill a ready worker. // // Timed out waiting for work to arrive. Kill a ready worker.
if workerCount > 0 { // if workerCount > 0 {
select { // select {
case workerTaskChan = <-p.readyWorkers: // case workerTaskChan = <-p.readyWorkers:
// A worker is ready, so kill. // // A worker is ready, so kill.
close(workerTaskChan) // close(workerTaskChan)
workerCount-- // workerCount--
default: // default:
// No work, but no ready workers. All workers are busy. // // No work, but no ready workers. All workers are busy.
} // }
} // }
} // }
} // }
// Stop all remaining workers as they become ready. // // Stop all remaining workers as they become ready.
for workerCount > 0 { // for workerCount > 0 {
workerTaskChan = <-p.readyWorkers // workerTaskChan = <-p.readyWorkers
close(workerTaskChan) // close(workerTaskChan)
workerCount-- // workerCount--
} // }
} // }
func startWorker(startReady, readyWorkers chan chan func()) { // func startWorker(startReady, readyWorkers chan chan func()) {
go func() { // go func() {
taskChan := make(chan func()) // taskChan := make(chan func())
var task func() // var task func()
var ok bool // var ok bool
// Register availability on starReady channel. // // Register availability on starReady channel.
startReady <- taskChan // startReady <- taskChan
for { // for {
// Read task from dispatcher. // // Read task from dispatcher.
task, ok = <-taskChan // task, ok = <-taskChan
if !ok { // if !ok {
// Dispatcher has told worker to stop. // // Dispatcher has told worker to stop.
break // break
} // }
// Execute the task. // // Execute the task.
task() // task()
// Register availability on readyWorkers channel. // // Register availability on readyWorkers channel.
readyWorkers <- taskChan // readyWorkers <- taskChan
} // }
}() // }()
} // }