2 Commits

Author SHA1 Message Date
Marc Magnin
d52d8dda07 #17 Enable use of MQTT broker as a library -> passing a logger reference to the broker instance 2018-02-08 11:13:50 +01:00
Marc Magnin
148dbbb23c #13 introduced sync.Pool 2018-02-05 15:14:46 +01:00
47 changed files with 1293 additions and 2842 deletions

View File

@@ -1,8 +0,0 @@
{
"go.lintFlags": [
"--disable=all",
"--enable=errcheck,varcheck,deadcode",
"--enable=varcheck",
"--enable=deadcode"
]
}

View File

@@ -1,12 +1,11 @@
FROM golang:1.12 as builder FROM alpine
WORKDIR /go/src/github.com/fhmq/hmq COPY hmq /
COPY . . COPY ssl /ssl
RUN CGO_ENABLED=0 go build -o hmq -a -ldflags '-extldflags "-static"' . COPY conf /conf
FROM alpine:3.8
WORKDIR /
COPY --from=builder /go/src/github.com/fhmq/hmq/hmq .
EXPOSE 1883 EXPOSE 1883
EXPOSE 1888
EXPOSE 8883
EXPOSE 1993
CMD ["/hmq"] CMD ["/hmq"]

105
README.md
View File

@@ -5,6 +5,8 @@ Free and High Performance MQTT Broker
Golang MQTT Broker, Version 3.1.1, and Compatible Golang MQTT Broker, Version 3.1.1, and Compatible
for [eclipse paho client](https://github.com/eclipse?utf8=%E2%9C%93&q=mqtt&type=&language=) and mosquitto-client for [eclipse paho client](https://github.com/eclipse?utf8=%E2%9C%93&q=mqtt&type=&language=) and mosquitto-client
Download: [click here](https://github.com/fhmq/hmq/releases)
## RUNNING ## RUNNING
```bash ```bash
$ go get github.com/fhmq/hmq $ go get github.com/fhmq/hmq
@@ -58,10 +60,8 @@ Common Options:
"certFile": "tls/server/cert.pem", "certFile": "tls/server/cert.pem",
"keyFile": "tls/server/key.pem" "keyFile": "tls/server/key.pem"
}, },
"plugins": { "acl":true,
"auth": "authhttp", "aclConf":"conf/acl.conf"
"bridge": "kafka"
}
} }
~~~ ~~~
@@ -77,28 +77,13 @@ Common Options:
* Supports will messages * Supports will messages
* Queue subscribe
* Websocket Support * Websocket Support
* TLS/SSL Support * TLS/SSL Support
* Auth Support * Flexible ACL
* Auth Connect
* Auth ACL
* Cache Support
* Kafka Bridge Support
* Action Deliver
* Regexp Deliver
* HTTP API
* Disconnect Connect (future more)
### Share SUBSCRIBE
~~~
| Prefix | Examples | Publish |
| ------------------- |-------------------------------------------|--------------------------- --|
| $share/<group>/topic | mosquitto_sub -t $share/<group>/topic | mosquitto_pub -t topic |
~~~
### Cluster ### Cluster
```bash ```bash
@@ -109,15 +94,65 @@ Common Options:
2, config router in hmq.config ("router": "127.0.0.1:9888") 2, config router in hmq.config ("router": "127.0.0.1:9888")
``` ```
Other Version Of Cluster Based On gRPC: [click here](https://github.com/fhmq/rhmq)
### Online/Offline Notification ### QUEUE SUBSCRIBE
```bash ~~~
topic: | Prefix | Examples |
$SYS/broker/connection/clients/<clientID> | ------------- |---------------------------------|
payload: | $queue/ | mosquitto_sub -t $queue/topic |
{"clientID":"client001","online":true/false,"timestamp":"2018-10-25T09:32:32Z"} ~~~
```
### ACL Configure
#### The ACL rules define:
~~~
Allow | type | value | pubsub | Topics
~~~
#### ACL Config
~~~
## type clientid , username, ipaddr
##pub 1 , sub 2, pubsub 3
## %c is clientid , %u is username
allow ip 127.0.0.1 2 $SYS/#
allow clientid 0001 3 #
allow username admin 3 #
allow username joy 3 /test,hello/world
allow clientid * 1 toCloud/%c
allow username * 1 toCloud/%u
deny clientid * 3 #
~~~
~~~
#allow local sub $SYS topic
allow ip 127.0.0.1 2 $SYS/#
~~~
~~~
#allow client who's id with 0001 or username with admin pub sub all topic
allow clientid 0001 3 #
allow username admin 3 #
~~~
~~~
#allow client with the username joy can pub sub topic '/test' and 'hello/world'
allow username joy 3 /test,hello/world
~~~
~~~
#allow all client pub the topic toCloud/{clientid/username}
allow clientid * 1 toCloud/%c
allow username * 1 toCloud/%u
~~~
~~~
#deny all client pub sub all topic
deny clientid * 3 #
~~~
Client match acl rule one by one
~~~
--------- --------- ---------
Client -> | Rule1 | --nomatch--> | Rule2 | --nomatch--> | Rule3 | -->
--------- --------- ---------
| | |
match match match
\|/ \|/ \|/
allow | deny allow | deny allow | deny
~~~
## Performance ## Performance
@@ -131,13 +166,3 @@ Other Version Of Cluster Based On gRPC: [click here](https://github.com/fhmq/rhm
## License ## License
* Apache License Version 2.0 * Apache License Version 2.0
## Reference
* Surgermq.(https://github.com/surgemq/surgemq)
## Benchmark Tool
* https://github.com/inovex/mqtt-stresser
* https://github.com/krylovsk/mqtt-benchmark

View File

@@ -1,40 +1,84 @@
/* Copyright (c) 2018, joy.zhou <chowyu08@gmail.com>
*/
package broker package broker
import ( import (
"strings" "strings"
"github.com/fhmq/hmq/lib/acl"
"go.uber.org/zap"
"github.com/fsnotify/fsnotify"
) )
const ( const (
SUB = "1" PUB = 1
PUB = "2" SUB = 2
) )
func (b *Broker) CheckTopicAuth(action, clientID, username, ip, topic string) bool { func (c *client) CheckTopicAuth(typ int, topic string) bool {
if b.auth != nil { if c.typ != CLIENT || !c.broker.config.Acl {
if strings.HasPrefix(topic, "$SYS/broker/connection/clients/") {
return true return true
} }
if strings.HasPrefix(topic, "$queue/") {
if strings.HasPrefix(topic, "$share/") && action == SUB { topic = string([]byte(topic)[7:])
substr := groupCompile.FindStringSubmatch(topic) if topic == "" {
if len(substr) != 3 {
return false return false
} }
topic = substr[2]
} }
ip := c.info.remoteIP
return b.auth.CheckACL(action, clientID, username, ip, topic) username := string(c.info.username)
} clientid := string(c.info.clientID)
aclInfo := c.broker.AclConfig
return true return acl.CheckTopicAuth(aclInfo, typ, ip, username, clientid, topic)
} }
func (b *Broker) CheckConnectAuth(clientID, username, password string) bool { var (
if b.auth != nil { watchList = []string{"./conf"}
return b.auth.CheckConnect(clientID, username, password) )
func (b *Broker) handleFsEvent(event fsnotify.Event) error {
switch event.Name {
case b.config.AclConf:
if event.Op&fsnotify.Write == fsnotify.Write ||
event.Op&fsnotify.Create == fsnotify.Create {
log.Info("text:handling acl config change event:", zap.String("filename", event.Name))
aclconfig, err := acl.AclConfigLoad(event.Name)
if err != nil {
log.Error("aclconfig change failed, load acl conf error: ", zap.Error(err))
return err
} }
b.AclConfig = aclconfig
return true }
}
return nil
}
func (b *Broker) StartAclWatcher() {
go func() {
wch, e := fsnotify.NewWatcher()
if e != nil {
log.Error("start monitor acl config file error,", zap.Error(e))
return
}
defer wch.Close()
for _, i := range watchList {
if err := wch.Add(i); err != nil {
log.Error("start monitor acl config file error,", zap.Error(err))
return
}
}
log.Info("watching acl config file change...")
for {
select {
case evt := <-wch.Events:
b.handleFsEvent(evt)
case err := <-wch.Errors:
log.Error("error:", zap.Error(err))
}
}
}()
} }

View File

@@ -1,15 +0,0 @@
package broker
import (
"github.com/fhmq/hmq/plugins/bridge"
"go.uber.org/zap"
)
func (b *Broker) Publish(e *bridge.Elements) {
if b.bridgeMQ != nil {
err := b.bridgeMQ.Publish(e)
if err != nil {
log.Error("send message to mq error.", zap.Error(err))
}
}
}

View File

@@ -1,29 +1,28 @@
/* Copyright (c) 2018, joy.zhou <chowyu08@gmail.com>
*/
package broker package broker
import ( import (
"crypto/tls" "crypto/tls"
"fmt"
"net" "net"
"net/http" "net/http"
"runtime/debug"
"sync" "sync"
"sync/atomic"
"time" "time"
"github.com/fhmq/hmq/plugins/bridge" "github.com/fhmq/hmq/lib/acl"
"github.com/fhmq/hmq/plugins/auth"
"github.com/fhmq/hmq/broker/lib/sessions"
"github.com/fhmq/hmq/broker/lib/topics"
"github.com/eclipse/paho.mqtt.golang/packets" "github.com/eclipse/paho.mqtt.golang/packets"
"github.com/fhmq/hmq/pool" "github.com/shirou/gopsutil/mem"
"go.uber.org/zap" "go.uber.org/zap"
"golang.org/x/net/websocket" "golang.org/x/net/websocket"
) )
const ( var (
MessagePoolNum = 1024 log *zap.Logger
MessagePoolMessageNum = 1024 messagePoolQueueSize = 4096
) )
type Message struct { type Message struct {
@@ -33,56 +32,32 @@ type Message struct {
type Broker struct { type Broker struct {
id string id string
cid uint64
mu sync.Mutex mu sync.Mutex
config *Config config *Config
tlsConfig *tls.Config tlsConfig *tls.Config
wpool *pool.WorkerPool AclConfig *acl.ACLConfig
dispatcher *Dispatcher
clients sync.Map clients sync.Map
routes sync.Map routes sync.Map
remotes sync.Map remotes sync.Map
nodes map[string]interface{} nodes map[string]interface{}
clusterPool chan *Message sl *Sublist
topicsMgr *topics.Manager rl *RetainList
sessionMgr *sessions.Manager queues map[string]int
auth auth.Auth
bridgeMQ bridge.BridgeMQ
} }
func newMessagePool() []chan *Message { func NewBroker(config *Config, logger *zap.Logger) (*Broker, error) {
pool := make([]chan *Message, 0) log = logger
for i := 0; i < MessagePoolNum; i++ {
ch := make(chan *Message, MessagePoolMessageNum)
pool = append(pool, ch)
}
return pool
}
func NewBroker(config *Config) (*Broker, error) {
if config == nil {
config = DefaultConfig
}
b := &Broker{ b := &Broker{
id: GenUniqueId(), id: GenUniqueId(),
config: config, config: config,
wpool: pool.New(config.Worker), dispatcher: NewDispatcher(),
sl: NewSublist(),
rl: NewRetainList(),
nodes: make(map[string]interface{}), nodes: make(map[string]interface{}),
clusterPool: make(chan *Message), queues: make(map[string]int),
} }
var err error
b.topicsMgr, err = topics.NewManager("mem")
if err != nil {
log.Error("new topic manager error", zap.Error(err))
return nil, err
}
b.sessionMgr, err = sessions.NewManager("mem")
if err != nil {
log.Error("new session manager error", zap.Error(err))
return nil, err
}
if b.config.TlsPort != "" { if b.config.TlsPort != "" {
tlsconfig, err := NewTLSConfig(b.config.TlsInfo) tlsconfig, err := NewTLSConfig(b.config.TlsInfo)
if err != nil { if err != nil {
@@ -91,25 +66,20 @@ func NewBroker(config *Config) (*Broker, error) {
} }
b.tlsConfig = tlsconfig b.tlsConfig = tlsconfig
} }
if b.config.Acl {
b.auth = auth.NewAuth(b.config.Plugin.Auth) aclconfig, err := acl.AclConfigLoad(b.config.AclConf)
b.bridgeMQ = bridge.NewBridgeMQ(b.config.Plugin.Bridge) if err != nil {
log.Error("Load acl conf error", zap.Error(err))
return nil, err
}
b.AclConfig = aclconfig
b.StartAclWatcher()
}
return b, nil return b, nil
} }
func (b *Broker) SubmitWork(clientId string, msg *Message) { func (b *Broker) DispatchMessage(msg *Message) {
if b.wpool == nil { b.dispatcher.Dispatch(msg)
b.wpool = pool.New(b.config.Worker)
}
if msg.client.typ == CLUSTER {
b.clusterPool <- msg
} else {
b.wpool.Submit(clientId, func() {
ProcessMessage(msg)
})
}
} }
@@ -119,10 +89,6 @@ func (b *Broker) Start() {
return return
} }
if b.config.HTTPPort != "" {
go InitHTTPMoniter(b)
}
//listen clinet over tcp //listen clinet over tcp
if b.config.Port != "" { if b.config.Port != "" {
go b.StartClientListening(false) go b.StartClientListening(false)
@@ -145,10 +111,25 @@ func (b *Broker) Start() {
//connect on other node in cluster //connect on other node in cluster
if b.config.Router != "" { if b.config.Router != "" {
go b.processClusterInfo()
b.ConnectToDiscovery() b.ConnectToDiscovery()
} }
//system monitor
go StateMonitor()
}
func StateMonitor() {
v, _ := mem.VirtualMemory()
timeSticker := time.NewTicker(time.Second * 30)
for {
select {
case <-timeSticker.C:
if v.UsedPercent > 75 {
debug.FreeOSMemory()
}
}
}
} }
func (b *Broker) StartWebsocketListening() { func (b *Broker) StartWebsocketListening() {
@@ -170,8 +151,9 @@ func (b *Broker) StartWebsocketListening() {
func (b *Broker) wsHandler(ws *websocket.Conn) { func (b *Broker) wsHandler(ws *websocket.Conn) {
// io.Copy(ws, ws) // io.Copy(ws, ws)
atomic.AddUint64(&b.cid, 1)
ws.PayloadType = websocket.BinaryFrame ws.PayloadType = websocket.BinaryFrame
b.handleConnection(CLIENT, ws) b.handleConnection(CLIENT, ws, b.cid)
} }
func (b *Broker) StartClientListening(Tls bool) { func (b *Broker) StartClientListening(Tls bool) {
@@ -209,11 +191,41 @@ func (b *Broker) StartClientListening(Tls bool) {
continue continue
} }
tmpDelay = ACCEPT_MIN_SLEEP tmpDelay = ACCEPT_MIN_SLEEP
go b.handleConnection(CLIENT, conn) atomic.AddUint64(&b.cid, 1)
go b.handleConnection(CLIENT, conn, b.cid)
} }
} }
func (b *Broker) Handshake(conn net.Conn) bool {
nc := tls.Server(conn, b.tlsConfig)
time.AfterFunc(DEFAULT_TLS_TIMEOUT, func() { TlsTimeout(nc) })
nc.SetReadDeadline(time.Now().Add(DEFAULT_TLS_TIMEOUT))
// Force handshake
if err := nc.Handshake(); err != nil {
log.Error("TLS handshake error, ", zap.Error(err))
return false
}
nc.SetReadDeadline(time.Time{})
return true
}
func TlsTimeout(conn *tls.Conn) {
nc := conn
// Check if already closed
if nc == nil {
return
}
cs := nc.ConnectionState()
if !cs.HandshakeComplete {
log.Error("TLS handshake timeout")
nc.Close()
}
}
func (b *Broker) StartClusterListening() { func (b *Broker) StartClusterListening() {
var hp string = b.config.Cluster.Host + ":" + b.config.Cluster.Port var hp string = b.config.Cluster.Host + ":" + b.config.Cluster.Port
log.Info("Start Listening cluster on ", zap.String("hp", hp)) log.Info("Start Listening cluster on ", zap.String("hp", hp))
@@ -224,6 +236,7 @@ func (b *Broker) StartClusterListening() {
return return
} }
var idx uint64 = 0
tmpDelay := 10 * ACCEPT_MIN_SLEEP tmpDelay := 10 * ACCEPT_MIN_SLEEP
for { for {
conn, err := l.Accept() conn, err := l.Accept()
@@ -243,11 +256,11 @@ func (b *Broker) StartClusterListening() {
} }
tmpDelay = ACCEPT_MIN_SLEEP tmpDelay = ACCEPT_MIN_SLEEP
go b.handleConnection(ROUTER, conn) go b.handleConnection(ROUTER, conn, idx)
} }
} }
func (b *Broker) handleConnection(typ int, conn net.Conn) { func (b *Broker) handleConnection(typ int, conn net.Conn, idx uint64) {
//process connect packet //process connect packet
packet, err := packets.ReadPacket(conn) packet, err := packets.ReadPacket(conn)
if err != nil { if err != nil {
@@ -263,32 +276,9 @@ func (b *Broker) handleConnection(typ int, conn net.Conn) {
log.Error("received msg that was not Connect") log.Error("received msg that was not Connect")
return return
} }
log.Info("read connect from ", zap.String("clientID", msg.ClientIdentifier))
connack := packets.NewControlPacket(packets.Connack).(*packets.ConnackPacket) connack := packets.NewControlPacket(packets.Connack).(*packets.ConnackPacket)
connack.ReturnCode = packets.Accepted
connack.SessionPresent = msg.CleanSession connack.SessionPresent = msg.CleanSession
connack.ReturnCode = msg.Validate()
if connack.ReturnCode != packets.Accepted {
err = connack.Write(conn)
if err != nil {
log.Error("send connack error, ", zap.Error(err), zap.String("clientID", msg.ClientIdentifier))
return
}
return
}
if typ == CLIENT && !b.CheckConnectAuth(string(msg.ClientIdentifier), string(msg.Username), string(msg.Password)) {
connack.ReturnCode = packets.ErrRefusedNotAuthorised
err = connack.Write(conn)
if err != nil {
log.Error("send connack error, ", zap.Error(err), zap.String("clientID", msg.ClientIdentifier))
return
}
return
}
err = connack.Write(conn) err = connack.Write(conn)
if err != nil { if err != nil {
log.Error("send connack error, ", zap.Error(err), zap.String("clientID", msg.ClientIdentifier)) log.Error("send connack error, ", zap.Error(err), zap.String("clientID", msg.ClientIdentifier))
@@ -322,12 +312,6 @@ func (b *Broker) handleConnection(typ int, conn net.Conn) {
c.init() c.init()
err = b.getSession(c, msg, connack)
if err != nil {
log.Error("get session error: ", zap.String("clientID", c.info.clientID))
return
}
cid := c.info.clientID cid := c.info.clientID
var exist bool var exist bool
@@ -344,16 +328,6 @@ func (b *Broker) handleConnection(typ int, conn net.Conn) {
} }
} }
b.clients.Store(cid, c) b.clients.Store(cid, c)
b.OnlineOfflineNotification(cid, true)
{
b.Publish(&bridge.Elements{
ClientID: string(msg.ClientIdentifier),
Username: string(msg.Username),
Action: bridge.Connect,
Timestamp: time.Now().Unix(),
})
}
case ROUTER: case ROUTER:
old, exist = b.routes.Load(cid) old, exist = b.routes.Load(cid)
if exist { if exist {
@@ -417,18 +391,6 @@ func (b *Broker) ConnectToDiscovery() {
go c.StartPing() go c.StartPing()
} }
func (b *Broker) processClusterInfo() {
for {
msg, ok := <-b.clusterPool
if !ok {
log.Error("read message from cluster channel error")
return
}
ProcessMessage(msg)
}
}
func (b *Broker) connectRouter(id, addr string) { func (b *Broker) connectRouter(id, addr string) {
var conn net.Conn var conn net.Conn
var err error var err error
@@ -489,7 +451,6 @@ func (b *Broker) connectRouter(id, addr string) {
c.SendConnect() c.SendConnect()
// mpool := b.messagePool[fnv1a.HashString64(cid)%MessagePoolNum]
go c.readLoop() go c.readLoop()
go c.StartPing() go c.StartPing()
@@ -538,9 +499,9 @@ func (b *Broker) SendLocalSubsToRouter(c *client) {
b.clients.Range(func(key, value interface{}) bool { b.clients.Range(func(key, value interface{}) bool {
client, ok := value.(*client) client, ok := value.(*client)
if ok { if ok {
subs := client.subMap subs := client.subs
for _, sub := range subs { for _, sub := range subs {
subInfo.Topics = append(subInfo.Topics, sub.topic) subInfo.Topics = append(subInfo.Topics, string(sub.topic))
subInfo.Qoss = append(subInfo.Qoss, sub.qos) subInfo.Qoss = append(subInfo.Qoss, sub.qos)
} }
} }
@@ -596,22 +557,17 @@ func (b *Broker) removeClient(c *client) {
} }
func (b *Broker) PublishMessage(packet *packets.PublishPacket) { func (b *Broker) PublishMessage(packet *packets.PublishPacket) {
var subs []interface{} topic := packet.TopicName
var qoss []byte r := b.sl.Match(topic)
b.mu.Lock() if len(r.psubs) == 0 {
err := b.topicsMgr.Subscribers([]byte(packet.TopicName), packet.Qos, &subs, &qoss)
b.mu.Unlock()
if err != nil {
log.Error("search sub client error, ", zap.Error(err))
return return
} }
for _, sub := range subs { for _, sub := range r.psubs {
s, ok := sub.(*subscription) if sub != nil {
if ok { err := sub.client.WriterPacket(packet)
err := s.client.WriterPacket(packet)
if err != nil { if err != nil {
log.Error("write message error, ", zap.Error(err)) log.Error("process message for psub error, ", zap.Error(err))
} }
} }
} }
@@ -628,12 +584,3 @@ func (b *Broker) BroadcastUnSubscribe(subs map[string]*subscription) {
b.BroadcastSubOrUnsubMessage(unsub) b.BroadcastSubOrUnsubMessage(unsub)
} }
} }
func (b *Broker) OnlineOfflineNotification(clientID string, online bool) {
packet := packets.NewControlPacket(packets.Publish).(*packets.PublishPacket)
packet.TopicName = "$SYS/broker/connection/clients/" + clientID
packet.Qos = 0
packet.Payload = []byte(fmt.Sprintf(`{"clientID":"%s","online":%v,"timestamp":"%s"}`, clientID, online, time.Now().UTC().Format(time.RFC3339)))
b.PublishMessage(packet)
}

View File

@@ -1,20 +1,14 @@
/* Copyright (c) 2018, joy.zhou <chowyu08@gmail.com>
*/
package broker package broker
import ( import (
"context"
"errors"
"math/rand"
"net" "net"
"reflect" "reflect"
"regexp"
"strings" "strings"
"sync" "sync"
"time" "time"
"github.com/fhmq/hmq/broker/lib/sessions"
"github.com/fhmq/hmq/broker/lib/topics"
"github.com/fhmq/hmq/plugins/bridge"
"github.com/eclipse/paho.mqtt.golang/packets" "github.com/eclipse/paho.mqtt.golang/packets"
"go.uber.org/zap" "go.uber.org/zap"
) )
@@ -30,20 +24,11 @@ const (
REMOTE = 2 REMOTE = 2
CLUSTER = 3 CLUSTER = 3
) )
const (
_GroupTopicRegexp = `^\$share/([0-9a-zA-Z_-]+)/(.*)$`
)
const ( const (
Connected = 1 Connected = 1
Disconnected = 2 Disconnected = 2
) )
var (
groupCompile = regexp.MustCompile(_GroupTopicRegexp)
)
type client struct { type client struct {
typ int typ int
mu sync.Mutex mu sync.Mutex
@@ -52,23 +37,22 @@ type client struct {
info info info info
route route route route
status int status int
ctx context.Context closed chan int
cancelFunc context.CancelFunc smu sync.RWMutex
session *sessions.Session subs map[string]*subscription
subMap map[string]*subscription rsubs map[string]*subInfo
topicsMgr *topics.Manager }
subs []interface{}
qoss []byte type subInfo struct {
rmsgs []*packets.PublishPacket sub *subscription
routeSubMap map[string]uint64 num int
} }
type subscription struct { type subscription struct {
client *client client *client
topic string topic string
qos byte qos byte
share bool queue bool
groupName string
} }
type info struct { type info struct {
@@ -88,63 +72,71 @@ type route struct {
var ( var (
DisconnectdPacket = packets.NewControlPacket(packets.Disconnect).(*packets.DisconnectPacket) DisconnectdPacket = packets.NewControlPacket(packets.Disconnect).(*packets.DisconnectPacket)
r = rand.New(rand.NewSource(time.Now().UnixNano()))
) )
func (c *client) init() { func (c *client) init() {
c.smu.Lock()
defer c.smu.Unlock()
c.status = Connected c.status = Connected
c.info.localIP, _, _ = net.SplitHostPort(c.conn.LocalAddr().String()) c.closed = make(chan int, 1)
c.info.remoteIP, _, _ = net.SplitHostPort(c.conn.RemoteAddr().String())
c.ctx, c.cancelFunc = context.WithCancel(context.Background()) c.rsubs = make(map[string]*subInfo)
c.subMap = make(map[string]*subscription) c.subs = make(map[string]*subscription, 10)
c.topicsMgr = c.broker.topicsMgr c.info.localIP = strings.Split(c.conn.LocalAddr().String(), ":")[0]
c.info.remoteIP = strings.Split(c.conn.RemoteAddr().String(), ":")[0]
}
func (c *client) keepAlive(ch chan int) {
defer close(ch)
keepalive := time.Duration(c.info.keepalive*3/2) * time.Second
timer := time.NewTimer(keepalive)
for {
select {
case <-ch:
timer.Reset(keepalive)
case <-timer.C:
if c.typ == REMOTE || c.typ == CLUSTER {
timer.Reset(keepalive)
continue
}
log.Error("Client exceeded timeout, disconnecting. ", zap.String("ClientID", c.info.clientID), zap.Uint16("keepalive", c.info.keepalive))
c.broker.DispatchMessage(&Message{client: c, packet: DisconnectdPacket})
timer.Stop()
return
case _, ok := <-c.closed:
if !ok {
return
}
}
}
} }
func (c *client) readLoop() { func (c *client) readLoop() {
nc := c.conn nc := c.conn
b := c.broker if nc == nil {
if nc == nil || b == nil {
return return
} }
keepAlive := time.Second * time.Duration(c.info.keepalive) ch := make(chan int, 1000)
timeOut := keepAlive + (keepAlive / 2) go c.keepAlive(ch)
for { for {
select {
case <-c.ctx.Done():
return
default:
//add read timeout
if err := nc.SetReadDeadline(time.Now().Add(timeOut)); err != nil {
log.Error("set read timeout error: ", zap.Error(err), zap.String("ClientID", c.info.clientID))
msg := &Message{
client: c,
packet: DisconnectdPacket,
}
b.SubmitWork(c.info.clientID, msg)
return
}
packet, err := packets.ReadPacket(nc) packet, err := packets.ReadPacket(nc)
if err != nil { if err != nil {
log.Error("read packet error: ", zap.Error(err), zap.String("ClientID", c.info.clientID)) log.Error("read packet error: ", zap.Error(err), zap.String("ClientID", c.info.clientID))
msg := &Message{ break
client: c,
packet: DisconnectdPacket,
}
b.SubmitWork(c.info.clientID, msg)
return
} }
// keepalive channel
ch <- 1
msg := &Message{ c.broker.DispatchMessage(&Message{
client: c, client: c,
packet: packet, packet: packet,
} })
b.SubmitWork(c.info.clientID, msg)
}
} }
c.broker.DispatchMessage(&Message{client: c, packet: DisconnectdPacket})
} }
func ProcessMessage(msg *Message) { func ProcessMessage(msg *Message) {
@@ -154,10 +146,7 @@ func ProcessMessage(msg *Message) {
return return
} }
if c.typ == CLIENT {
log.Debug("Recv message:", zap.String("message type", reflect.TypeOf(msg.packet).String()[9:]), zap.String("ClientID", c.info.clientID)) log.Debug("Recv message:", zap.String("message type", reflect.TypeOf(msg.packet).String()[9:]), zap.String("ClientID", c.info.clientID))
}
switch ca.(type) { switch ca.(type) {
case *packets.ConnackPacket: case *packets.ConnackPacket:
case *packets.ConnectPacket: case *packets.ConnectPacket:
@@ -187,74 +176,21 @@ func ProcessMessage(msg *Message) {
} }
func (c *client) ProcessPublish(packet *packets.PublishPacket) { func (c *client) ProcessPublish(packet *packets.PublishPacket) {
switch c.typ {
case CLIENT:
c.processClientPublish(packet)
case ROUTER:
c.processRouterPublish(packet)
case CLUSTER:
c.processRemotePublish(packet)
}
}
func (c *client) processRemotePublish(packet *packets.PublishPacket) {
if c.status == Disconnected { if c.status == Disconnected {
return return
} }
topic := packet.TopicName topic := packet.TopicName
if topic == BrokerInfoTopic { if topic == BrokerInfoTopic && c.typ == CLUSTER {
c.ProcessInfo(packet) c.ProcessInfo(packet)
return return
} }
} if !c.CheckTopicAuth(PUB, topic) {
func (c *client) processRouterPublish(packet *packets.PublishPacket) {
if c.status == Disconnected {
return
}
switch packet.Qos {
case QosAtMostOnce:
c.ProcessPublishMessage(packet)
case QosAtLeastOnce:
puback := packets.NewControlPacket(packets.Puback).(*packets.PubackPacket)
puback.MessageID = packet.MessageID
if err := c.WriterPacket(puback); err != nil {
log.Error("send puback error, ", zap.Error(err), zap.String("ClientID", c.info.clientID))
return
}
c.ProcessPublishMessage(packet)
case QosExactlyOnce:
return
default:
log.Error("publish with unknown qos", zap.String("ClientID", c.info.clientID))
return
}
}
func (c *client) processClientPublish(packet *packets.PublishPacket) {
topic := packet.TopicName
if !c.broker.CheckTopicAuth(PUB, c.info.clientID, c.info.username, c.info.remoteIP, topic) {
log.Error("Pub Topics Auth failed, ", zap.String("topic", topic), zap.String("ClientID", c.info.clientID)) log.Error("Pub Topics Auth failed, ", zap.String("topic", topic), zap.String("ClientID", c.info.clientID))
return return
} }
//publish kafka
c.broker.Publish(&bridge.Elements{
ClientID: c.info.clientID,
Username: c.info.username,
Action: bridge.Publish,
Timestamp: time.Now().Unix(),
Payload: string(packet.Payload),
Topic: topic,
})
switch packet.Qos { switch packet.Qos {
case QosAtMostOnce: case QosAtMostOnce:
c.ProcessPublishMessage(packet) c.ProcessPublishMessage(packet)
@@ -272,71 +208,105 @@ func (c *client) processClientPublish(packet *packets.PublishPacket) {
log.Error("publish with unknown qos", zap.String("ClientID", c.info.clientID)) log.Error("publish with unknown qos", zap.String("ClientID", c.info.clientID))
return return
} }
if packet.Retain {
if b := c.broker; b != nil {
err := b.rl.Insert(topic, packet)
if err != nil {
log.Error("Insert Retain Message error: ", zap.Error(err), zap.String("ClientID", c.info.clientID))
}
}
}
} }
func (c *client) ProcessPublishMessage(packet *packets.PublishPacket) { func (c *client) ProcessPublishMessage(packet *packets.PublishPacket) {
if c.status == Disconnected {
return
}
b := c.broker b := c.broker
if b == nil { if b == nil {
return return
} }
typ := c.typ typ := c.typ
topic := packet.TopicName
if packet.Retain { r := b.sl.Match(topic)
if err := c.topicsMgr.Retain(packet); err != nil { // log.Info("psubs num: ", len(r.psubs))
log.Error("Error retaining message: ", zap.Error(err), zap.String("ClientID", c.info.clientID)) if len(r.qsubs) == 0 && len(r.psubs) == 0 {
}
}
err := c.topicsMgr.Subscribers([]byte(packet.TopicName), packet.Qos, &c.subs, &c.qoss)
if err != nil {
log.Error("Error retrieving subscribers list: ", zap.String("ClientID", c.info.clientID))
return return
} }
// fmt.Println("psubs num: ", len(c.subs)) for _, sub := range r.psubs {
if len(c.subs) == 0 { if sub.client.typ == ROUTER {
return
}
var qsub []int
for i, sub := range c.subs {
s, ok := sub.(*subscription)
if ok {
if s.client.typ == ROUTER {
if typ != CLIENT { if typ != CLIENT {
continue continue
} }
} }
if s.share { if sub != nil {
qsub = append(qsub, i) err := sub.client.WriterPacket(packet)
if err != nil {
log.Error("process message for psub error, ", zap.Error(err), zap.String("ClientID", c.info.clientID))
}
}
}
pre := -1
now := -1
t := "$queue/" + topic
cnt, exist := b.queues[t]
if exist {
// log.Info("queue index : ", cnt)
for _, sub := range r.qsubs {
if sub.client.typ == ROUTER {
if typ != CLIENT {
continue
}
}
if c.typ == CLIENT {
now = now + 1
} else { } else {
publish(s, packet) now = now + sub.client.rsubs[t].num
}
if cnt > pre && cnt <= now {
if sub != nil {
err := sub.client.WriterPacket(packet)
if err != nil {
log.Error("send publish error, ", zap.Error(err), zap.String("ClientID", c.info.clientID))
}
} }
break
}
pre = now
}
} }
length := getQueueSubscribeNum(r.qsubs)
if length > 0 {
b.queues[t] = (b.queues[t] + 1) % length
} }
}
if len(qsub) > 0 { func getQueueSubscribeNum(qsubs []*subscription) int {
idx := r.Intn(len(qsub)) topic := "$queue/"
sub := c.subs[qsub[idx]].(*subscription) if len(qsubs) < 1 {
publish(sub, packet) return 0
} else {
topic = topic + qsubs[0].topic
} }
num := 0
for _, sub := range qsubs {
if sub.client.typ == CLIENT {
num = num + 1
} else {
num = num + sub.client.rsubs[topic].num
}
}
return num
} }
func (c *client) ProcessSubscribe(packet *packets.SubscribePacket) { func (c *client) ProcessSubscribe(packet *packets.SubscribePacket) {
switch c.typ {
case CLIENT:
c.processClientSubscribe(packet)
case ROUTER:
c.processRouterSubscribe(packet)
}
}
func (c *client) processClientSubscribe(packet *packets.SubscribePacket) {
if c.status == Disconnected { if c.status == Disconnected {
return return
} }
@@ -355,61 +325,60 @@ func (c *client) processClientSubscribe(packet *packets.SubscribePacket) {
for i, topic := range topics { for i, topic := range topics {
t := topic t := topic
//check topic auth for client //check topic auth for client
if !b.CheckTopicAuth(SUB, c.info.clientID, c.info.username, c.info.remoteIP, topic) { if !c.CheckTopicAuth(SUB, topic) {
log.Error("Sub topic Auth failed: ", zap.String("topic", topic), zap.String("ClientID", c.info.clientID)) log.Error("Sub topic Auth failed: ", zap.String("topic", topic), zap.String("ClientID", c.info.clientID))
retcodes = append(retcodes, QosFailure) retcodes = append(retcodes, QosFailure)
continue continue
} }
b.Publish(&bridge.Elements{ queue := strings.HasPrefix(topic, "$queue/")
ClientID: c.info.clientID, if queue {
Username: c.info.username, if len(t) > 7 {
Action: bridge.Subscribe, t = t[7:]
Timestamp: time.Now().Unix(), if _, exists := b.queues[topic]; !exists {
Topic: topic, b.queues[topic] = 0
}) }
} else {
groupName := ""
share := false
if strings.HasPrefix(topic, "$share/") {
substr := groupCompile.FindStringSubmatch(topic)
if len(substr) != 3 {
retcodes = append(retcodes, QosFailure) retcodes = append(retcodes, QosFailure)
continue continue
} }
share = true
groupName = substr[1]
topic = substr[2]
} }
if oldSub, exist := c.subMap[t]; exist {
c.topicsMgr.Unsubscribe([]byte(oldSub.topic), oldSub)
delete(c.subMap, t)
}
sub := &subscription{ sub := &subscription{
topic: topic, topic: t,
qos: qoss[i], qos: qoss[i],
client: c, client: c,
share: share, queue: queue,
groupName: groupName,
} }
switch c.typ {
case CLIENT:
if _, exist := c.subs[topic]; !exist {
c.subs[topic] = sub
rqos, err := c.topicsMgr.Subscribe([]byte(topic), qoss[i], sub) } else {
if err != nil { //if exist ,check whether qos change
log.Error("subscribe error, ", zap.Error(err), zap.String("ClientID", c.info.clientID)) c.subs[topic].qos = qoss[i]
retcodes = append(retcodes, QosFailure) retcodes = append(retcodes, qoss[i])
continue continue
} }
case ROUTER:
if subinfo, exist := c.rsubs[topic]; !exist {
sinfo := &subInfo{sub: sub, num: 1}
c.rsubs[topic] = sinfo
c.subMap[t] = sub } else {
subinfo.num = subinfo.num + 1
c.session.AddTopic(t, qoss[i]) retcodes = append(retcodes, qoss[i])
retcodes = append(retcodes, rqos) continue
c.topicsMgr.Retained([]byte(topic), &c.rmsgs) }
}
err := b.sl.Insert(sub)
if err != nil {
log.Error("Insert subscription error: ", zap.Error(err), zap.String("ClientID", c.info.clientID))
retcodes = append(retcodes, QosFailure)
} else {
retcodes = append(retcodes, qoss[i])
}
} }
suback.ReturnCodes = retcodes suback.ReturnCodes = retcodes
err := c.WriterPacket(suback) err := c.WriterPacket(suback)
@@ -418,150 +387,52 @@ func (c *client) processClientSubscribe(packet *packets.SubscribePacket) {
return return
} }
//broadcast subscribe message //broadcast subscribe message
if c.typ == CLIENT {
go b.BroadcastSubOrUnsubMessage(packet) go b.BroadcastSubOrUnsubMessage(packet)
}
//process retain message //process retain message
for _, rm := range c.rmsgs { for _, t := range topics {
if err := c.WriterPacket(rm); err != nil { packets := b.rl.Match(t)
log.Error("Error publishing retained message:", zap.Any("err", err), zap.String("ClientID", c.info.clientID)) for _, packet := range packets {
} else {
log.Info("process retain message: ", zap.Any("packet", packet), zap.String("ClientID", c.info.clientID)) log.Info("process retain message: ", zap.Any("packet", packet), zap.String("ClientID", c.info.clientID))
if packet != nil {
c.WriterPacket(packet)
} }
} }
}
func (c *client) processRouterSubscribe(packet *packets.SubscribePacket) {
if c.status == Disconnected {
return
}
b := c.broker
if b == nil {
return
}
topics := packet.Topics
qoss := packet.Qoss
suback := packets.NewControlPacket(packets.Suback).(*packets.SubackPacket)
suback.MessageID = packet.MessageID
var retcodes []byte
for i, topic := range topics {
t := topic
groupName := ""
share := false
if strings.HasPrefix(topic, "$share/") {
substr := groupCompile.FindStringSubmatch(topic)
if len(substr) != 3 {
retcodes = append(retcodes, QosFailure)
continue
}
share = true
groupName = substr[1]
topic = substr[2]
}
sub := &subscription{
topic: topic,
qos: qoss[i],
client: c,
share: share,
groupName: groupName,
}
rqos, err := c.topicsMgr.Subscribe([]byte(topic), qoss[i], sub)
if err != nil {
log.Error("subscribe error, ", zap.Error(err), zap.String("ClientID", c.info.clientID))
retcodes = append(retcodes, QosFailure)
continue
}
c.subMap[t] = sub
addSubMap(c.routeSubMap, topic)
retcodes = append(retcodes, rqos)
}
suback.ReturnCodes = retcodes
err := c.WriterPacket(suback)
if err != nil {
log.Error("send suback error, ", zap.Error(err), zap.String("ClientID", c.info.clientID))
return
} }
} }
func (c *client) ProcessUnSubscribe(packet *packets.UnsubscribePacket) { func (c *client) ProcessUnSubscribe(packet *packets.UnsubscribePacket) {
switch c.typ { if c.status == Disconnected {
return
}
b := c.broker
if b == nil {
return
}
typ := c.typ
topics := packet.Topics
for _, t := range topics {
switch typ {
case CLIENT: case CLIENT:
c.processClientUnSubscribe(packet) sub, ok := c.subs[t]
if ok {
c.unsubscribe(sub)
}
case ROUTER: case ROUTER:
c.processRouterUnSubscribe(packet) subinfo, ok := c.rsubs[t]
if ok {
subinfo.num = subinfo.num - 1
if subinfo.num < 1 {
delete(c.rsubs, t)
c.unsubscribe(subinfo.sub)
} else {
c.rsubs[t] = subinfo
} }
}
func (c *client) processRouterUnSubscribe(packet *packets.UnsubscribePacket) {
if c.status == Disconnected {
return
} }
b := c.broker
if b == nil {
return
}
topics := packet.Topics
for _, topic := range topics {
sub, exist := c.subMap[topic]
if exist {
retainNum := delSubMap(c.routeSubMap, topic)
if retainNum > 0 {
continue
}
c.topicsMgr.Unsubscribe([]byte(sub.topic), sub)
delete(c.subMap, topic)
}
}
unsuback := packets.NewControlPacket(packets.Unsuback).(*packets.UnsubackPacket)
unsuback.MessageID = packet.MessageID
err := c.WriterPacket(unsuback)
if err != nil {
log.Error("send unsuback error, ", zap.Error(err), zap.String("ClientID", c.info.clientID))
return
}
}
func (c *client) processClientUnSubscribe(packet *packets.UnsubscribePacket) {
if c.status == Disconnected {
return
}
b := c.broker
if b == nil {
return
}
topics := packet.Topics
for _, topic := range topics {
{
//publish kafka
b.Publish(&bridge.Elements{
ClientID: c.info.clientID,
Username: c.info.username,
Action: bridge.Unsubscribe,
Timestamp: time.Now().Unix(),
Topic: topic,
})
}
sub, exist := c.subMap[topic]
if exist {
c.topicsMgr.Unsubscribe([]byte(sub.topic), sub)
c.session.RemoveTopic(topic)
delete(c.subMap, topic)
} }
} }
@@ -575,7 +446,22 @@ func (c *client) processClientUnSubscribe(packet *packets.UnsubscribePacket) {
return return
} }
// //process ubsubscribe message // //process ubsubscribe message
if c.typ == CLIENT {
b.BroadcastSubOrUnsubMessage(packet) b.BroadcastSubOrUnsubMessage(packet)
}
}
func (c *client) unsubscribe(sub *subscription) {
if c.typ == CLIENT {
delete(c.subs, sub.topic)
}
b := c.broker
if b != nil && sub != nil {
b.sl.Remove(sub)
}
} }
func (c *client) ProcessPing() { func (c *client) ProcessPing() {
@@ -591,47 +477,37 @@ func (c *client) ProcessPing() {
} }
func (c *client) Close() { func (c *client) Close() {
c.smu.Lock()
if c.status == Disconnected { if c.status == Disconnected {
c.smu.Unlock()
return return
} }
c.cancelFunc()
c.status = Disconnected
//wait for message complete //wait for message complete
// time.Sleep(1 * time.Second) time.Sleep(1 * time.Second)
// c.status = Disconnected c.status = Disconnected
b := c.broker
b.Publish(&bridge.Elements{
ClientID: c.info.clientID,
Username: c.info.username,
Action: bridge.Disconnect,
Timestamp: time.Now().Unix(),
})
if c.conn != nil { if c.conn != nil {
c.conn.Close() c.conn.Close()
c.conn = nil c.conn = nil
} }
subs := c.subMap c.smu.Unlock()
close(c.closed)
b := c.broker
subs := c.subs
if b != nil { if b != nil {
b.removeClient(c) b.removeClient(c)
for _, sub := range subs { for _, sub := range subs {
err := b.topicsMgr.Unsubscribe([]byte(sub.topic), sub) err := b.sl.Remove(sub)
if err != nil { if err != nil {
log.Error("unsubscribe error, ", zap.Error(err), zap.String("ClientID", c.info.clientID)) log.Error("closed client but remove sublist error, ", zap.Error(err), zap.String("ClientID", c.info.clientID))
} }
} }
if c.typ == CLIENT { if c.typ == CLIENT {
b.BroadcastUnSubscribe(subs) b.BroadcastUnSubscribe(subs)
//offline notification
b.OnlineOfflineNotification(c.info.clientID, false)
} }
if c.info.willMsg != nil { if c.info.willMsg != nil {
b.PublishMessage(c.info.willMsg) b.PublishMessage(c.info.willMsg)
} }
@@ -648,17 +524,9 @@ func (c *client) Close() {
} }
func (c *client) WriterPacket(packet packets.ControlPacket) error { func (c *client) WriterPacket(packet packets.ControlPacket) error {
if c.status == Disconnected {
return nil
}
if packet == nil { if packet == nil {
return nil return nil
} }
if c.conn == nil {
c.Close()
return errors.New("connect lost ....")
}
c.mu.Lock() c.mu.Lock()
err := packet.Write(c.conn) err := packet.Write(c.conn)

View File

@@ -1,15 +1,17 @@
/* Copyright (c) 2018, joy.zhou <chowyu08@gmail.com>
*/
package broker package broker
import ( import (
"encoding/json" "crypto/md5"
"crypto/rand"
"encoding/base64"
"encoding/hex"
"errors"
"io"
"reflect" "reflect"
"strings"
"time" "time"
"github.com/tidwall/gjson"
"go.uber.org/zap"
"github.com/eclipse/paho.mqtt.golang/packets"
uuid "github.com/satori/go.uuid"
) )
const ( const (
@@ -46,6 +48,47 @@ const (
QosFailure = 0x80 QosFailure = 0x80
) )
func SubscribeTopicCheckAndSpilt(topic string) ([]string, error) {
if strings.Index(topic, "#") != -1 && strings.Index(topic, "#") != len(topic)-1 {
return nil, errors.New("Topic format error with index of #")
}
re := strings.Split(topic, "/")
for i, v := range re {
if i != 0 && i != (len(re)-1) {
if v == "" {
return nil, errors.New("Topic format error with index of //")
}
if strings.Contains(v, "+") && v != "+" {
return nil, errors.New("Topic format error with index of +")
}
} else {
if v == "" {
re[i] = "/"
}
}
}
return re, nil
}
func PublishTopicCheckAndSpilt(topic string) ([]string, error) {
if strings.Index(topic, "#") != -1 || strings.Index(topic, "+") != -1 {
return nil, errors.New("Publish Topic format error with + and #")
}
re := strings.Split(topic, "/")
for i, v := range re {
if v == "" {
if i != 0 && i != (len(re)-1) {
return nil, errors.New("Topic format error with index of //")
} else {
re[i] = "/"
}
}
}
return re, nil
}
func equal(k1, k2 interface{}) bool { func equal(k1, k2 interface{}) bool {
if reflect.TypeOf(k1) != reflect.TypeOf(k2) { if reflect.TypeOf(k1) != reflect.TypeOf(k2) {
return false return false
@@ -91,65 +134,13 @@ func equal(k1, k2 interface{}) bool {
return false return false
} }
func addSubMap(m map[string]uint64, topic string) {
subNum, exist := m[topic]
if exist {
m[topic] = subNum + 1
} else {
m[topic] = 1
}
}
func delSubMap(m map[string]uint64, topic string) uint64 {
subNum, exist := m[topic]
if exist {
if subNum > 1 {
m[topic] = subNum - 1
return subNum - 1
}
} else {
m[topic] = 0
}
return 0
}
func GenUniqueId() string { func GenUniqueId() string {
return uuid.NewV4().String() b := make([]byte, 48)
} if _, err := io.ReadFull(rand.Reader, b); err != nil {
return ""
func wrapPublishPacket(packet *packets.PublishPacket) *packets.PublishPacket {
p := packet.Copy()
wrapPayload := map[string]interface{}{
"message_id": GenUniqueId(),
"payload": string(p.Payload),
}
b, _ := json.Marshal(wrapPayload)
p.Payload = b
return p
}
func unWrapPublishPacket(packet *packets.PublishPacket) *packets.PublishPacket {
p := packet.Copy()
if gjson.GetBytes(p.Payload, "payload").Exists() {
p.Payload = []byte(gjson.GetBytes(p.Payload, "payload").String())
}
return p
}
func publish(sub *subscription, packet *packets.PublishPacket) {
// var p *packets.PublishPacket
// if sub.client.info.username != "root" {
// p = unWrapPublishPacket(packet)
// } else {
// p = wrapPublishPacket(packet)
// }
// err := sub.client.WriterPacket(p)
// if err != nil {
// log.Error("process message for psub error, ", zap.Error(err))
// }
err := sub.client.WriterPacket(packet)
if err != nil {
log.Error("process message for psub error, ", zap.Error(err))
} }
h := md5.New()
h.Write([]byte(base64.URLEncoding.EncodeToString(b)))
return hex.EncodeToString(h.Sum(nil))
// return GetMd5String()
} }

View File

@@ -1,3 +1,5 @@
/* Copyright (c) 2018, joy.zhou <chowyu08@gmail.com>
*/
package broker package broker
import ( import (
@@ -16,7 +18,6 @@ import (
type Config struct { type Config struct {
Worker int `json:"workerNum"` Worker int `json:"workerNum"`
HTTPPort string `json:"httpPort"`
Host string `json:"host"` Host string `json:"host"`
Port string `json:"port"` Port string `json:"port"`
Cluster RouteInfo `json:"cluster"` Cluster RouteInfo `json:"cluster"`
@@ -27,13 +28,9 @@ type Config struct {
WsPort string `json:"wsPort"` WsPort string `json:"wsPort"`
WsTLS bool `json:"wsTLS"` WsTLS bool `json:"wsTLS"`
TlsInfo TLSInfo `json:"tlsInfo"` TlsInfo TLSInfo `json:"tlsInfo"`
Debug bool `json:"debug"` Acl bool `json:"acl"`
Plugin Plugins `json:"plugins"` AclConf string `json:"aclConf"`
} Debug bool `json:"-"`
type Plugins struct {
Auth string
Bridge string
} }
type RouteInfo struct { type RouteInfo struct {
@@ -52,12 +49,9 @@ var DefaultConfig *Config = &Config{
Worker: 4096, Worker: 4096,
Host: "0.0.0.0", Host: "0.0.0.0",
Port: "1883", Port: "1883",
Acl: false,
} }
var (
log = logger.Prod().Named("broker")
)
func showHelp() { func showHelp() {
fmt.Printf("%s\n", usageStr) fmt.Printf("%s\n", usageStr)
os.Exit(0) os.Exit(0)
@@ -76,8 +70,6 @@ func ConfigureConfig(args []string) (*Config, error) {
fs.BoolVar(&help, "help", false, "Show this message.") fs.BoolVar(&help, "help", false, "Show this message.")
fs.IntVar(&config.Worker, "w", 1024, "worker num to process message, perfer (client num)/10.") fs.IntVar(&config.Worker, "w", 1024, "worker num to process message, perfer (client num)/10.")
fs.IntVar(&config.Worker, "worker", 1024, "worker num to process message, perfer (client num)/10.") fs.IntVar(&config.Worker, "worker", 1024, "worker num to process message, perfer (client num)/10.")
fs.StringVar(&config.HTTPPort, "httpport", "8080", "Port to listen on.")
fs.StringVar(&config.HTTPPort, "hp", "8080", "Port to listen on.")
fs.StringVar(&config.Port, "port", "1883", "Port to listen on.") fs.StringVar(&config.Port, "port", "1883", "Port to listen on.")
fs.StringVar(&config.Port, "p", "1883", "Port to listen on.") fs.StringVar(&config.Port, "p", "1883", "Port to listen on.")
fs.StringVar(&config.Host, "host", "0.0.0.0", "Network host to listen on") fs.StringVar(&config.Host, "host", "0.0.0.0", "Network host to listen on")
@@ -112,6 +104,9 @@ func ConfigureConfig(args []string) (*Config, error) {
} }
}) })
logger.InitLogger(config.Debug)
log = logger.Get().Named("Broker")
if configFile != "" { if configFile != "" {
tmpConfig, e := LoadConfig(configFile) tmpConfig, e := LoadConfig(configFile)
if e != nil { if e != nil {
@@ -121,10 +116,6 @@ func ConfigureConfig(args []string) (*Config, error) {
} }
} }
if config.Debug {
log = logger.Debug().Named("broker")
}
if err := config.check(); err != nil { if err := config.check(); err != nil {
return nil, err return nil, err
} }
@@ -137,7 +128,7 @@ func LoadConfig(filename string) (*Config, error) {
content, err := ioutil.ReadFile(filename) content, err := ioutil.ReadFile(filename)
if err != nil { if err != nil {
// log.Error("Read config file error: ", zap.Error(err)) log.Error("Read config file error: ", zap.Error(err))
return nil, err return nil, err
} }
// log.Info(string(content)) // log.Info(string(content))
@@ -145,7 +136,7 @@ func LoadConfig(filename string) (*Config, error) {
var config Config var config Config
err = json.Unmarshal(content, &config) err = json.Unmarshal(content, &config)
if err != nil { if err != nil {
// log.Error("Unmarshal config file error: ", zap.Error(err)) log.Error("Unmarshal config file error: ", zap.Error(err))
return nil, err return nil, err
} }

25
broker/dispatcher.go Normal file
View File

@@ -0,0 +1,25 @@
package broker
import (
"sync"
)
// Dispatcher will delegate ProcessMessage func to multiple goroutines
type Dispatcher struct {
workerPool *sync.Pool
}
// NewDispatcher create a *Dispatcher instance
func NewDispatcher() *Dispatcher {
return &Dispatcher{workerPool: &sync.Pool{
New: func() interface{} {
return NewWorker()
},
},
}
}
// Dispatch a message to the workers
func (d *Dispatcher) Dispatch(message *Message) {
d.workerPool.Get().(Worker).WorkerChannel <- Work{WorkerPool: d.workerPool, Message: message}
}

View File

@@ -1,26 +0,0 @@
package broker
import (
"github.com/gin-gonic/gin"
)
func InitHTTPMoniter(b *Broker) {
gin.SetMode(gin.ReleaseMode)
router := gin.Default()
router.DELETE("api/v1/connections/:clientid", func(c *gin.Context) {
clientid := c.Param("clientid")
cli, ok := b.clients.Load(clientid)
if ok {
conn, succss := cli.(*client)
if succss {
conn.Close()
}
}
resp := map[string]int{
"code": 0,
}
c.JSON(200, &resp)
})
router.Run(":" + b.config.HTTPPort)
}

View File

@@ -1,12 +1,15 @@
/* Copyright (c) 2018, joy.zhou <chowyu08@gmail.com>
*/
package broker package broker
import ( import (
"fmt" "fmt"
"time" "time"
simplejson "github.com/bitly/go-simplejson"
"github.com/eclipse/paho.mqtt.golang/packets" "github.com/eclipse/paho.mqtt.golang/packets"
"go.uber.org/zap" "go.uber.org/zap"
simplejson "github.com/bitly/go-simplejson"
) )
func (c *client) SendInfo() { func (c *client) SendInfo() {
@@ -34,10 +37,12 @@ func (c *client) StartPing() {
log.Error("ping error: ", zap.Error(err)) log.Error("ping error: ", zap.Error(err))
c.Close() c.Close()
} }
case <-c.ctx.Done(): case _, ok := <-c.closed:
if !ok {
return return
} }
} }
}
} }
func (c *client) SendConnect() { func (c *client) SendConnect() {

View File

@@ -1,62 +0,0 @@
package sessions
import (
"fmt"
"sync"
)
var _ SessionsProvider = (*memProvider)(nil)
func init() {
Register("mem", NewMemProvider())
}
type memProvider struct {
st map[string]*Session
mu sync.RWMutex
}
func NewMemProvider() *memProvider {
return &memProvider{
st: make(map[string]*Session),
}
}
func (this *memProvider) New(id string) (*Session, error) {
this.mu.Lock()
defer this.mu.Unlock()
this.st[id] = &Session{id: id}
return this.st[id], nil
}
func (this *memProvider) Get(id string) (*Session, error) {
this.mu.RLock()
defer this.mu.RUnlock()
sess, ok := this.st[id]
if !ok {
return nil, fmt.Errorf("store/Get: No session found for key %s", id)
}
return sess, nil
}
func (this *memProvider) Del(id string) {
this.mu.Lock()
defer this.mu.Unlock()
delete(this.st, id)
}
func (this *memProvider) Save(id string) error {
return nil
}
func (this *memProvider) Count() int {
return len(this.st)
}
func (this *memProvider) Close() error {
this.st = make(map[string]*Session)
return nil
}

View File

@@ -1,149 +0,0 @@
package sessions
import (
"fmt"
"sync"
"github.com/eclipse/paho.mqtt.golang/packets"
)
const (
// Queue size for the ack queue
defaultQueueSize = 16
)
type Session struct {
// cmsg is the CONNECT message
cmsg *packets.ConnectPacket
// Will message to publish if connect is closed unexpectedly
Will *packets.PublishPacket
// Retained publish message
Retained *packets.PublishPacket
// topics stores all the topis for this session/client
topics map[string]byte
// Initialized?
initted bool
// Serialize access to this session
mu sync.Mutex
id string
}
func (this *Session) Init(msg *packets.ConnectPacket) error {
this.mu.Lock()
defer this.mu.Unlock()
if this.initted {
return fmt.Errorf("Session already initialized")
}
this.cmsg = msg
if this.cmsg.WillFlag {
this.Will = packets.NewControlPacket(packets.Publish).(*packets.PublishPacket)
this.Will.Qos = this.cmsg.Qos
this.Will.TopicName = this.cmsg.WillTopic
this.Will.Payload = this.cmsg.WillMessage
this.Will.Retain = this.cmsg.WillRetain
}
this.topics = make(map[string]byte, 1)
this.id = string(msg.ClientIdentifier)
this.initted = true
return nil
}
func (this *Session) Update(msg *packets.ConnectPacket) error {
this.mu.Lock()
defer this.mu.Unlock()
this.cmsg = msg
return nil
}
func (this *Session) RetainMessage(msg *packets.PublishPacket) error {
this.mu.Lock()
defer this.mu.Unlock()
this.Retained = msg
return nil
}
func (this *Session) AddTopic(topic string, qos byte) error {
this.mu.Lock()
defer this.mu.Unlock()
if !this.initted {
return fmt.Errorf("Session not yet initialized")
}
this.topics[topic] = qos
return nil
}
func (this *Session) RemoveTopic(topic string) error {
this.mu.Lock()
defer this.mu.Unlock()
if !this.initted {
return fmt.Errorf("Session not yet initialized")
}
delete(this.topics, topic)
return nil
}
func (this *Session) Topics() ([]string, []byte, error) {
this.mu.Lock()
defer this.mu.Unlock()
if !this.initted {
return nil, nil, fmt.Errorf("Session not yet initialized")
}
var (
topics []string
qoss []byte
)
for k, v := range this.topics {
topics = append(topics, k)
qoss = append(qoss, v)
}
return topics, qoss, nil
}
func (this *Session) ID() string {
return this.cmsg.ClientIdentifier
}
func (this *Session) WillFlag() bool {
this.mu.Lock()
defer this.mu.Unlock()
return this.cmsg.WillFlag
}
func (this *Session) SetWillFlag(v bool) {
this.mu.Lock()
defer this.mu.Unlock()
this.cmsg.WillFlag = v
}
func (this *Session) CleanSession() bool {
this.mu.Lock()
defer this.mu.Unlock()
return this.cmsg.CleanSession
}

View File

@@ -1,92 +0,0 @@
package sessions
import (
"crypto/rand"
"encoding/base64"
"errors"
"fmt"
"io"
)
var (
ErrSessionsProviderNotFound = errors.New("Session: Session provider not found")
ErrKeyNotAvailable = errors.New("Session: not item found for key.")
providers = make(map[string]SessionsProvider)
)
type SessionsProvider interface {
New(id string) (*Session, error)
Get(id string) (*Session, error)
Del(id string)
Save(id string) error
Count() int
Close() error
}
// Register makes a session provider available by the provided name.
// If a Register is called twice with the same name or if the driver is nil,
// it panics.
func Register(name string, provider SessionsProvider) {
if provider == nil {
panic("session: Register provide is nil")
}
if _, dup := providers[name]; dup {
panic("session: Register called twice for provider " + name)
}
providers[name] = provider
}
func Unregister(name string) {
delete(providers, name)
}
type Manager struct {
p SessionsProvider
}
func NewManager(providerName string) (*Manager, error) {
p, ok := providers[providerName]
if !ok {
return nil, fmt.Errorf("session: unknown provider %q", providerName)
}
return &Manager{p: p}, nil
}
func (this *Manager) New(id string) (*Session, error) {
if id == "" {
id = this.sessionId()
}
return this.p.New(id)
}
func (this *Manager) Get(id string) (*Session, error) {
return this.p.Get(id)
}
func (this *Manager) Del(id string) {
this.p.Del(id)
}
func (this *Manager) Save(id string) error {
return this.p.Save(id)
}
func (this *Manager) Count() int {
return this.p.Count()
}
func (this *Manager) Close() error {
return this.p.Close()
}
func (manager *Manager) sessionId() string {
b := make([]byte, 15)
if _, err := io.ReadFull(rand.Reader, b); err != nil {
return ""
}
return base64.URLEncoding.EncodeToString(b)
}

View File

@@ -1,550 +0,0 @@
package topics
import (
"fmt"
"reflect"
"sync"
"github.com/eclipse/paho.mqtt.golang/packets"
)
const (
QosAtMostOnce byte = iota
QosAtLeastOnce
QosExactlyOnce
QosFailure = 0x80
)
var _ TopicsProvider = (*memTopics)(nil)
type memTopics struct {
// Sub/unsub mutex
smu sync.RWMutex
// Subscription tree
sroot *snode
// Retained message mutex
rmu sync.RWMutex
// Retained messages topic tree
rroot *rnode
}
func init() {
Register("mem", NewMemProvider())
}
// NewMemProvider returns an new instance of the memTopics, which is implements the
// TopicsProvider interface. memProvider is a hidden struct that stores the topic
// subscriptions and retained messages in memory. The content is not persistend so
// when the server goes, everything will be gone. Use with care.
func NewMemProvider() *memTopics {
return &memTopics{
sroot: newSNode(),
rroot: newRNode(),
}
}
func ValidQos(qos byte) bool {
return qos == QosAtMostOnce || qos == QosAtLeastOnce || qos == QosExactlyOnce
}
func (this *memTopics) Subscribe(topic []byte, qos byte, sub interface{}) (byte, error) {
if !ValidQos(qos) {
return QosFailure, fmt.Errorf("Invalid QoS %d", qos)
}
if sub == nil {
return QosFailure, fmt.Errorf("Subscriber cannot be nil")
}
this.smu.Lock()
defer this.smu.Unlock()
if qos > QosExactlyOnce {
qos = QosExactlyOnce
}
if err := this.sroot.sinsert(topic, qos, sub); err != nil {
return QosFailure, err
}
return qos, nil
}
func (this *memTopics) Unsubscribe(topic []byte, sub interface{}) error {
this.smu.Lock()
defer this.smu.Unlock()
return this.sroot.sremove(topic, sub)
}
// Returned values will be invalidated by the next Subscribers call
func (this *memTopics) Subscribers(topic []byte, qos byte, subs *[]interface{}, qoss *[]byte) error {
if !ValidQos(qos) {
return fmt.Errorf("Invalid QoS %d", qos)
}
this.smu.RLock()
defer this.smu.RUnlock()
*subs = (*subs)[0:0]
*qoss = (*qoss)[0:0]
return this.sroot.smatch(topic, qos, subs, qoss)
}
func (this *memTopics) Retain(msg *packets.PublishPacket) error {
this.rmu.Lock()
defer this.rmu.Unlock()
// So apparently, at least according to the MQTT Conformance/Interoperability
// Testing, that a payload of 0 means delete the retain message.
// https://eclipse.org/paho/clients/testing/
if len(msg.Payload) == 0 {
return this.rroot.rremove([]byte(msg.TopicName))
}
return this.rroot.rinsertOrUpdate([]byte(msg.TopicName), msg)
}
func (this *memTopics) Retained(topic []byte, msgs *[]*packets.PublishPacket) error {
this.rmu.RLock()
defer this.rmu.RUnlock()
return this.rroot.rmatch(topic, msgs)
}
func (this *memTopics) Close() error {
this.sroot = nil
this.rroot = nil
return nil
}
// subscrition nodes
type snode struct {
// If this is the end of the topic string, then add subscribers here
subs []interface{}
qos []byte
// Otherwise add the next topic level here
snodes map[string]*snode
}
func newSNode() *snode {
return &snode{
snodes: make(map[string]*snode),
}
}
func (this *snode) sinsert(topic []byte, qos byte, sub interface{}) error {
// If there's no more topic levels, that means we are at the matching snode
// to insert the subscriber. So let's see if there's such subscriber,
// if so, update it. Otherwise insert it.
if len(topic) == 0 {
// Let's see if the subscriber is already on the list. If yes, update
// QoS and then return.
for i := range this.subs {
if equal(this.subs[i], sub) {
this.qos[i] = qos
return nil
}
}
// Otherwise add.
this.subs = append(this.subs, sub)
this.qos = append(this.qos, qos)
return nil
}
// Not the last level, so let's find or create the next level snode, and
// recursively call it's insert().
// ntl = next topic level
ntl, rem, err := nextTopicLevel(topic)
if err != nil {
return err
}
level := string(ntl)
// Add snode if it doesn't already exist
n, ok := this.snodes[level]
if !ok {
n = newSNode()
this.snodes[level] = n
}
return n.sinsert(rem, qos, sub)
}
// This remove implementation ignores the QoS, as long as the subscriber
// matches then it's removed
func (this *snode) sremove(topic []byte, sub interface{}) error {
// If the topic is empty, it means we are at the final matching snode. If so,
// let's find the matching subscribers and remove them.
if len(topic) == 0 {
// If subscriber == nil, then it's signal to remove ALL subscribers
if sub == nil {
this.subs = this.subs[0:0]
this.qos = this.qos[0:0]
return nil
}
// If we find the subscriber then remove it from the list. Technically
// we just overwrite the slot by shifting all other items up by one.
for i := range this.subs {
if equal(this.subs[i], sub) {
this.subs = append(this.subs[:i], this.subs[i+1:]...)
this.qos = append(this.qos[:i], this.qos[i+1:]...)
return nil
}
}
return fmt.Errorf("No topic found for subscriber")
}
// Not the last level, so let's find the next level snode, and recursively
// call it's remove().
// ntl = next topic level
ntl, rem, err := nextTopicLevel(topic)
if err != nil {
return err
}
level := string(ntl)
// Find the snode that matches the topic level
n, ok := this.snodes[level]
if !ok {
return fmt.Errorf("No topic found")
}
// Remove the subscriber from the next level snode
if err := n.sremove(rem, sub); err != nil {
return err
}
// If there are no more subscribers and snodes to the next level we just visited
// let's remove it
if len(n.subs) == 0 && len(n.snodes) == 0 {
delete(this.snodes, level)
}
return nil
}
// smatch() returns all the subscribers that are subscribed to the topic. Given a topic
// with no wildcards (publish topic), it returns a list of subscribers that subscribes
// to the topic. For each of the level names, it's a match
// - if there are subscribers to '#', then all the subscribers are added to result set
func (this *snode) smatch(topic []byte, qos byte, subs *[]interface{}, qoss *[]byte) error {
// If the topic is empty, it means we are at the final matching snode. If so,
// let's find the subscribers that match the qos and append them to the list.
if len(topic) == 0 {
this.matchQos(qos, subs, qoss)
if mwcn, _ := this.snodes[MWC]; mwcn != nil {
mwcn.matchQos(qos, subs, qoss)
}
return nil
}
// ntl = next topic level
ntl, rem, err := nextTopicLevel(topic)
if err != nil {
return err
}
level := string(ntl)
for k, n := range this.snodes {
// If the key is "#", then these subscribers are added to the result set
if k == MWC {
n.matchQos(qos, subs, qoss)
} else if k == SWC || k == level {
if err := n.smatch(rem, qos, subs, qoss); err != nil {
return err
}
}
}
return nil
}
// retained message nodes
type rnode struct {
// If this is the end of the topic string, then add retained messages here
msg *packets.PublishPacket
// Otherwise add the next topic level here
rnodes map[string]*rnode
}
func newRNode() *rnode {
return &rnode{
rnodes: make(map[string]*rnode),
}
}
func (this *rnode) rinsertOrUpdate(topic []byte, msg *packets.PublishPacket) error {
// If there's no more topic levels, that means we are at the matching rnode.
if len(topic) == 0 {
// Reuse the message if possible
this.msg = msg
return nil
}
// Not the last level, so let's find or create the next level snode, and
// recursively call it's insert().
// ntl = next topic level
ntl, rem, err := nextTopicLevel(topic)
if err != nil {
return err
}
level := string(ntl)
// Add snode if it doesn't already exist
n, ok := this.rnodes[level]
if !ok {
n = newRNode()
this.rnodes[level] = n
}
return n.rinsertOrUpdate(rem, msg)
}
// Remove the retained message for the supplied topic
func (this *rnode) rremove(topic []byte) error {
// If the topic is empty, it means we are at the final matching rnode. If so,
// let's remove the buffer and message.
if len(topic) == 0 {
this.msg = nil
return nil
}
// Not the last level, so let's find the next level rnode, and recursively
// call it's remove().
// ntl = next topic level
ntl, rem, err := nextTopicLevel(topic)
if err != nil {
return err
}
level := string(ntl)
// Find the rnode that matches the topic level
n, ok := this.rnodes[level]
if !ok {
return fmt.Errorf("No topic found")
}
// Remove the subscriber from the next level rnode
if err := n.rremove(rem); err != nil {
return err
}
// If there are no more rnodes to the next level we just visited let's remove it
if len(n.rnodes) == 0 {
delete(this.rnodes, level)
}
return nil
}
// rmatch() finds the retained messages for the topic and qos provided. It's somewhat
// of a reverse match compare to match() since the supplied topic can contain
// wildcards, whereas the retained message topic is a full (no wildcard) topic.
func (this *rnode) rmatch(topic []byte, msgs *[]*packets.PublishPacket) error {
// If the topic is empty, it means we are at the final matching rnode. If so,
// add the retained msg to the list.
if len(topic) == 0 {
if this.msg != nil {
*msgs = append(*msgs, this.msg)
}
return nil
}
// ntl = next topic level
ntl, rem, err := nextTopicLevel(topic)
if err != nil {
return err
}
level := string(ntl)
if level == MWC {
// If '#', add all retained messages starting this node
this.allRetained(msgs)
} else if level == SWC {
// If '+', check all nodes at this level. Next levels must be matched.
for _, n := range this.rnodes {
if err := n.rmatch(rem, msgs); err != nil {
return err
}
}
} else {
// Otherwise, find the matching node, go to the next level
if n, ok := this.rnodes[level]; ok {
if err := n.rmatch(rem, msgs); err != nil {
return err
}
}
}
return nil
}
func (this *rnode) allRetained(msgs *[]*packets.PublishPacket) {
if this.msg != nil {
*msgs = append(*msgs, this.msg)
}
for _, n := range this.rnodes {
n.allRetained(msgs)
}
}
const (
stateCHR byte = iota // Regular character
stateMWC // Multi-level wildcard
stateSWC // Single-level wildcard
stateSEP // Topic level separator
stateSYS // System level topic ($)
)
// Returns topic level, remaining topic levels and any errors
func nextTopicLevel(topic []byte) ([]byte, []byte, error) {
s := stateCHR
for i, c := range topic {
switch c {
case '/':
if s == stateMWC {
return nil, nil, fmt.Errorf("Multi-level wildcard found in topic and it's not at the last level")
}
if i == 0 {
return []byte(SWC), topic[i+1:], nil
}
return topic[:i], topic[i+1:], nil
case '#':
if i != 0 {
return nil, nil, fmt.Errorf("Wildcard character '#' must occupy entire topic level")
}
s = stateMWC
case '+':
if i != 0 {
return nil, nil, fmt.Errorf("Wildcard character '+' must occupy entire topic level")
}
s = stateSWC
// case '$':
// if i == 0 {
// return nil, nil, fmt.Errorf("Cannot publish to $ topics")
// }
// s = stateSYS
default:
if s == stateMWC || s == stateSWC {
return nil, nil, fmt.Errorf("Wildcard characters '#' and '+' must occupy entire topic level")
}
s = stateCHR
}
}
// If we got here that means we didn't hit the separator along the way, so the
// topic is either empty, or does not contain a separator. Either way, we return
// the full topic
return topic, nil, nil
}
// The QoS of the payload messages sent in response to a subscription must be the
// minimum of the QoS of the originally published message (in this case, it's the
// qos parameter) and the maximum QoS granted by the server (in this case, it's
// the QoS in the topic tree).
//
// It's also possible that even if the topic matches, the subscriber is not included
// due to the QoS granted is lower than the published message QoS. For example,
// if the client is granted only QoS 0, and the publish message is QoS 1, then this
// client is not to be send the published message.
func (this *snode) matchQos(qos byte, subs *[]interface{}, qoss *[]byte) {
for _, sub := range this.subs {
// If the published QoS is higher than the subscriber QoS, then we skip the
// subscriber. Otherwise, add to the list.
// if qos >= this.qos[i] {
*subs = append(*subs, sub)
*qoss = append(*qoss, qos)
// }
}
}
func equal(k1, k2 interface{}) bool {
if reflect.TypeOf(k1) != reflect.TypeOf(k2) {
return false
}
if reflect.ValueOf(k1).Kind() == reflect.Func {
return &k1 == &k2
}
if k1 == k2 {
return true
}
switch k1 := k1.(type) {
case string:
return k1 == k2.(string)
case int64:
return k1 == k2.(int64)
case int32:
return k1 == k2.(int32)
case int16:
return k1 == k2.(int16)
case int8:
return k1 == k2.(int8)
case int:
return k1 == k2.(int)
case float32:
return k1 == k2.(float32)
case float64:
return k1 == k2.(float64)
case uint:
return k1 == k2.(uint)
case uint8:
return k1 == k2.(uint8)
case uint16:
return k1 == k2.(uint16)
case uint32:
return k1 == k2.(uint32)
case uint64:
return k1 == k2.(uint64)
case uintptr:
return k1 == k2.(uintptr)
}
return false
}

View File

@@ -1,91 +0,0 @@
package topics
import (
"fmt"
"github.com/eclipse/paho.mqtt.golang/packets"
)
const (
// MWC is the multi-level wildcard
MWC = "#"
// SWC is the single level wildcard
SWC = "+"
// SEP is the topic level separator
SEP = "/"
// SYS is the starting character of the system level topics
SYS = "$"
// Both wildcards
_WC = "#+"
)
var (
providers = make(map[string]TopicsProvider)
)
// TopicsProvider
type TopicsProvider interface {
Subscribe(topic []byte, qos byte, subscriber interface{}) (byte, error)
Unsubscribe(topic []byte, subscriber interface{}) error
Subscribers(topic []byte, qos byte, subs *[]interface{}, qoss *[]byte) error
Retain(msg *packets.PublishPacket) error
Retained(topic []byte, msgs *[]*packets.PublishPacket) error
Close() error
}
func Register(name string, provider TopicsProvider) {
if provider == nil {
panic("topics: Register provide is nil")
}
if _, dup := providers[name]; dup {
panic("topics: Register called twice for provider " + name)
}
providers[name] = provider
}
func Unregister(name string) {
delete(providers, name)
}
type Manager struct {
p TopicsProvider
}
func NewManager(providerName string) (*Manager, error) {
p, ok := providers[providerName]
if !ok {
return nil, fmt.Errorf("session: unknown provider %q", providerName)
}
return &Manager{p: p}, nil
}
func (this *Manager) Subscribe(topic []byte, qos byte, subscriber interface{}) (byte, error) {
return this.p.Subscribe(topic, qos, subscriber)
}
func (this *Manager) Unsubscribe(topic []byte, subscriber interface{}) error {
return this.p.Unsubscribe(topic, subscriber)
}
func (this *Manager) Subscribers(topic []byte, qos byte, subs *[]interface{}, qoss *[]byte) error {
return this.p.Subscribers(topic, qos, subs, qoss)
}
func (this *Manager) Retain(msg *packets.PublishPacket) error {
return this.p.Retain(msg)
}
func (this *Manager) Retained(topic []byte, msgs *[]*packets.PublishPacket) error {
return this.p.Retained(topic, msgs)
}
func (this *Manager) Close() error {
return this.p.Close()
}

122
broker/retain.go Normal file
View File

@@ -0,0 +1,122 @@
package broker
import (
"sync"
"github.com/eclipse/paho.mqtt.golang/packets"
)
type RetainList struct {
sync.RWMutex
root *rlevel
}
type rlevel struct {
nodes map[string]*rnode
}
type rnode struct {
next *rlevel
msg *packets.PublishPacket
}
type RetainResult struct {
msg []*packets.PublishPacket
}
func newRNode() *rnode {
return &rnode{}
}
func newRLevel() *rlevel {
return &rlevel{nodes: make(map[string]*rnode)}
}
func NewRetainList() *RetainList {
return &RetainList{root: newRLevel()}
}
func (r *RetainList) Insert(topic string, buf *packets.PublishPacket) error {
tokens, err := PublishTopicCheckAndSpilt(topic)
if err != nil {
return err
}
// log.Info("insert tokens:", tokens)
r.Lock()
l := r.root
var n *rnode
for _, t := range tokens {
n = l.nodes[t]
if n == nil {
n = newRNode()
l.nodes[t] = n
}
if n.next == nil {
n.next = newRLevel()
}
l = n.next
}
n.msg = buf
r.Unlock()
return nil
}
func (r *RetainList) Match(topic string) []*packets.PublishPacket {
tokens, err := SubscribeTopicCheckAndSpilt(topic)
if err != nil {
return nil
}
results := &RetainResult{}
r.Lock()
l := r.root
matchRLevel(l, tokens, results)
r.Unlock()
// log.Info("results: ", results)
return results.msg
}
func matchRLevel(l *rlevel, toks []string, results *RetainResult) {
var n *rnode
for i, t := range toks {
if l == nil {
return
}
// log.Info("l info :", l.nodes)
if t == "#" {
for _, n := range l.nodes {
n.GetAll(results)
}
}
if t == "+" {
for _, n := range l.nodes {
if len(t[i+1:]) == 0 {
results.msg = append(results.msg, n.msg)
} else {
matchRLevel(n.next, toks[i+1:], results)
}
}
}
n = l.nodes[t]
if n != nil {
l = n.next
} else {
l = nil
}
}
if n != nil {
results.msg = append(results.msg, n.msg)
}
}
func (r *rnode) GetAll(results *RetainResult) {
// log.Info("node 's message: ", string(r.msg))
if r.msg != nil {
results.msg = append(results.msg, r.msg)
}
l := r.next
for _, n := range l.nodes {
n.GetAll(results)
}
}

View File

@@ -1,53 +0,0 @@
package broker
import "github.com/eclipse/paho.mqtt.golang/packets"
func (b *Broker) getSession(cli *client, req *packets.ConnectPacket, resp *packets.ConnackPacket) error {
// If CleanSession is set to 0, the server MUST resume communications with the
// client based on state from the current session, as identified by the client
// identifier. If there is no session associated with the client identifier the
// server must create a new session.
//
// If CleanSession is set to 1, the client and server must discard any previous
// session and start a new one. b session lasts as long as the network c
// onnection. State data associated with b session must not be reused in any
// subsequent session.
var err error
// Check to see if the client supplied an ID, if not, generate one and set
// clean session.
if len(req.ClientIdentifier) == 0 {
req.CleanSession = true
}
cid := req.ClientIdentifier
// If CleanSession is NOT set, check the session store for existing session.
// If found, return it.
if !req.CleanSession {
if cli.session, err = b.sessionMgr.Get(cid); err == nil {
resp.SessionPresent = true
if err := cli.session.Update(req); err != nil {
return err
}
}
}
// If CleanSession, or no existing session found, then create a new one
if cli.session == nil {
if cli.session, err = b.sessionMgr.New(cid); err != nil {
return err
}
resp.SessionPresent = false
if err := cli.session.Init(req); err != nil {
return err
}
}
return nil
}

318
broker/sublist.go Normal file
View File

@@ -0,0 +1,318 @@
/* Copyright (c) 2018, joy.zhou <chowyu08@gmail.com>
*/
package broker
import (
"errors"
"sync"
"go.uber.org/zap"
)
// A result structure better optimized for queue subs.
type SublistResult struct {
psubs []*subscription
qsubs []*subscription // don't make this a map, too expensive to iterate
}
// A Sublist stores and efficiently retrieves subscriptions.
type Sublist struct {
sync.RWMutex
cache map[string]*SublistResult
root *level
}
// A node contains subscriptions and a pointer to the next level.
type node struct {
next *level
psubs []*subscription
qsubs []*subscription
}
// A level represents a group of nodes and special pointers to
// wildcard nodes.
type level struct {
nodes map[string]*node
}
// Create a new default node.
func newNode() *node {
return &node{psubs: make([]*subscription, 0, 4), qsubs: make([]*subscription, 0, 4)}
}
// Create a new default level. We use FNV1A as the hash
// algortihm for the tokens, which should be short.
func newLevel() *level {
return &level{nodes: make(map[string]*node)}
}
// New will create a default sublist
func NewSublist() *Sublist {
return &Sublist{root: newLevel(), cache: make(map[string]*SublistResult)}
}
// Insert adds a subscription into the sublist
func (s *Sublist) Insert(sub *subscription) error {
tokens, err := SubscribeTopicCheckAndSpilt(sub.topic)
if err != nil {
return err
}
s.Lock()
l := s.root
var n *node
for _, t := range tokens {
n = l.nodes[t]
if n == nil {
n = newNode()
l.nodes[t] = n
}
if n.next == nil {
n.next = newLevel()
}
l = n.next
}
if sub.queue {
//check qsub is already exist
for i := range n.qsubs {
if equal(n.qsubs[i], sub) {
n.qsubs[i] = sub
return nil
}
}
n.qsubs = append(n.qsubs, sub)
} else {
//check psub is already exist
for i := range n.psubs {
if equal(n.psubs[i], sub) {
n.psubs[i] = sub
return nil
}
}
n.psubs = append(n.psubs, sub)
}
topic := string(sub.topic)
s.addToCache(topic, sub)
s.Unlock()
return nil
}
func (s *Sublist) addToCache(topic string, sub *subscription) {
for k, r := range s.cache {
if matchLiteral(k, topic) {
// Copy since others may have a reference.
nr := copyResult(r)
if sub.queue == false {
nr.psubs = append(nr.psubs, sub)
} else {
nr.qsubs = append(nr.qsubs, sub)
}
s.cache[k] = nr
}
}
}
func (s *Sublist) removeFromCache(topic string, sub *subscription) {
for k := range s.cache {
if !matchLiteral(k, topic) {
continue
}
// Since someone else may be referecing, can't modify the list
// safely, just let it re-populate.
delete(s.cache, k)
}
}
func matchLiteral(literal, topic string) bool {
tok, _ := SubscribeTopicCheckAndSpilt(topic)
li, _ := PublishTopicCheckAndSpilt(literal)
for i := 0; i < len(tok); i++ {
b := tok[i]
switch b {
case "+":
case "#":
return true
default:
if b != li[i] {
return false
}
}
}
return true
}
// Deep copy
func copyResult(r *SublistResult) *SublistResult {
nr := &SublistResult{}
nr.psubs = append([]*subscription(nil), r.psubs...)
nr.qsubs = append([]*subscription(nil), r.qsubs...)
return nr
}
func (s *Sublist) Remove(sub *subscription) error {
tokens, err := SubscribeTopicCheckAndSpilt(sub.topic)
if err != nil {
return err
}
s.Lock()
defer s.Unlock()
l := s.root
var n *node
for _, t := range tokens {
if l == nil {
return errors.New("No Matches subscription Found")
}
n = l.nodes[t]
if n != nil {
l = n.next
} else {
l = nil
}
}
if !s.removeFromNode(n, sub) {
return errors.New("No Matches subscription Found")
}
topic := string(sub.topic)
s.removeFromCache(topic, sub)
return nil
}
func (s *Sublist) removeFromNode(n *node, sub *subscription) (found bool) {
if n == nil {
return false
}
if sub.queue {
n.qsubs, found = removeSubFromList(sub, n.qsubs)
return found
} else {
n.psubs, found = removeSubFromList(sub, n.psubs)
return found
}
return false
}
func (s *Sublist) Match(topic string) *SublistResult {
s.RLock()
rc, ok := s.cache[topic]
s.RUnlock()
if ok {
return rc
}
tokens, err := PublishTopicCheckAndSpilt(topic)
if err != nil {
log.Error("\tserver/sublist.go: ", zap.Error(err))
return nil
}
result := &SublistResult{}
s.Lock()
l := s.root
if len(tokens) > 0 {
if tokens[0] == "/" {
if _, exist := l.nodes["#"]; exist {
addNodeToResults(l.nodes["#"], result)
}
if _, exist := l.nodes["+"]; exist {
matchLevel(l.nodes["/"].next, tokens[1:], result)
}
if _, exist := l.nodes["/"]; exist {
matchLevel(l.nodes["/"].next, tokens[1:], result)
}
} else {
matchLevel(s.root, tokens, result)
}
}
s.cache[topic] = result
if len(s.cache) > 1024 {
for k := range s.cache {
delete(s.cache, k)
break
}
}
s.Unlock()
return result
}
func matchLevel(l *level, toks []string, results *SublistResult) {
var swc, n *node
exist := false
for i, t := range toks {
if l == nil {
return
}
if _, exist = l.nodes["#"]; exist {
addNodeToResults(l.nodes["#"], results)
}
if t != "/" {
if swc, exist = l.nodes["+"]; exist {
matchLevel(l.nodes["+"].next, toks[i+1:], results)
}
} else {
if _, exist = l.nodes["+"]; exist {
addNodeToResults(l.nodes["+"], results)
}
}
n = l.nodes[t]
if n != nil {
l = n.next
} else {
l = nil
}
}
if n != nil {
addNodeToResults(n, results)
}
if swc != nil {
addNodeToResults(n, results)
}
}
// This will add in a node's results to the total results.
func addNodeToResults(n *node, results *SublistResult) {
results.psubs = append(results.psubs, n.psubs...)
results.qsubs = append(results.qsubs, n.qsubs...)
}
func removeSubFromList(sub *subscription, sl []*subscription) ([]*subscription, bool) {
for i := 0; i < len(sl); i++ {
if sl[i] == sub {
last := len(sl) - 1
sl[i] = sl[last]
sl[last] = nil
sl = sl[:last]
return shrinkAsNeeded(sl), true
}
}
return sl, false
}
// Checks if we need to do a resize. This is for very large growth then
// subsequent return to a more normal size from unsubscribe.
func shrinkAsNeeded(sl []*subscription) []*subscription {
lsl := len(sl)
csl := cap(sl)
// Don't bother if list not too big
if csl <= 8 {
return sl
}
pFree := float32(csl-lsl) / float32(csl)
if pFree > 0.50 {
return append([]*subscription(nil), sl...)
}
return sl
}

28
broker/worker.go Normal file
View File

@@ -0,0 +1,28 @@
package broker
import "sync"
type Work struct {
WorkerPool *sync.Pool
Message *Message
}
type Worker struct {
WorkerChannel chan Work
}
func NewWorker() Worker {
w := Worker{WorkerChannel: make(chan Work)}
return w.Start()
}
func (w Worker) Start() Worker {
go func() {
for work := range w.WorkerChannel {
ProcessMessage(work.Message)
// put the worker back
work.WorkerPool.Put(w)
}
}()
return w
}

View File

@@ -1,4 +1,4 @@
## sub 1 , pub 2, pubsub 3 ## pub 1 , sub 2, pubsub 3
## %c is clientid , %s is username ## %c is clientid , %s is username
##auth type value pub/sub topic ##auth type value pub/sub topic
allow ip 127.0.0.1 2 $SYS/# allow ip 127.0.0.1 2 $SYS/#

View File

@@ -2,12 +2,10 @@
"workerNum": 4096, "workerNum": 4096,
"port": "1883", "port": "1883",
"host": "0.0.0.0", "host": "0.0.0.0",
"debug": true,
"cluster": { "cluster": {
"host": "0.0.0.0", "host": "0.0.0.0",
"port": "1993" "port": "1993"
}, },
"httpPort": "8080",
"router": "127.0.0.1:9888", "router": "127.0.0.1:9888",
"tlsPort": "8883", "tlsPort": "8883",
"tlsHost": "0.0.0.0", "tlsHost": "0.0.0.0",
@@ -20,8 +18,6 @@
"certFile": "ssl/server/cert.pem", "certFile": "ssl/server/cert.pem",
"keyFile": "ssl/server/key.pem" "keyFile": "ssl/server/key.pem"
}, },
"plugins": { "acl": false,
"auth": "authhttp", "aclConf": "conf/acl.conf"
"bridge": "kafka"
}
} }

View File

@@ -1,37 +0,0 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: mqtt-broker
data:
hmq.config: |
{
"workerNum": 4096,
"port": "1883",
"host": "0.0.0.0",
"plugins": ["authhttp","kafka"]
}
kafka.json: |
{
"addr": [
"127.0.0.1:9090"
],
"onConnect": "onConnect",
"onPublish": "onPublish",
"onSubscribe": "onSubscribe",
"onDisconnect": "onDisconnect",
"onUnsubscribe": "onUnsubscribe",
"deliverMap": {
"#": "publish",
"/upload/+/#": "upload"
}
}
authhttp.json: |
{
"auth": "http://127.0.0.1:9090/mqtt/auth",
"acl": "http://127.0.0.1:9090/mqtt/acl",
"super": "http://127.0.0.1:9090/mqtt/superuser"
}

View File

@@ -1,44 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: mqtt-broker
spec:
selector:
matchLabels:
app: mqtt-broker
replicas: 1
template:
metadata:
labels:
app: mqtt-broker
spec:
containers:
- name: mqtt-broker
image: uhub.service.ucloud.cn/uiot_core_hub/hmq:v0.1.0
ports:
- containerPort: 1883
- containerPort: 8080
volumeMounts:
- name: mqtt-broker
mountPath: /conf
subPath: hmq.config
- name: mqtt-broker
mountPath: /plugins/kafka/kafka.json
subPath: kafka.json
- name: mqtt-broker
mountPath: /plugins/authttp/http.json
subPath: kafka.json
volumes:
- name: mqtt-broker
configMap:
name: mqtt-broker
items:
- key: hmq.config
path: hmq.config
items:
- key: http.json
path: http.json
items:
- key: kafka.json
path: kafka.json

View File

@@ -1,13 +0,0 @@
kind: Service
apiVersion: v1
metadata:
name: mqtt-broker
spec:
selector:
app: mqtt-broker
ports:
- protocol: TCP
port: 1883
targetPort: 1883
type: ClusterIP
sessionAffinity: ClientIP

27
go.mod
View File

@@ -1,27 +0,0 @@
module github.com/fhmq/hmq
go 1.12
require (
github.com/Shopify/sarama v1.26.1
github.com/bitly/go-simplejson v0.5.0
github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 // indirect
github.com/eclipse/paho.mqtt.golang v1.2.0
github.com/gin-gonic/gin v1.4.0
github.com/golang/protobuf v1.3.2 // indirect
github.com/kr/pretty v0.1.0 // indirect
github.com/patrickmn/go-cache v2.1.0+incompatible
github.com/pkg/errors v0.8.1 // indirect
github.com/satori/go.uuid v1.2.0
github.com/segmentio/fasthash v0.0.0-20180216231524-a72b379d632e
github.com/stretchr/testify v1.3.0
github.com/tidwall/gjson v1.3.0
go.uber.org/atomic v1.4.0 // indirect
go.uber.org/multierr v1.1.0 // indirect
go.uber.org/zap v1.10.0
golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4 // indirect
golang.org/x/net v0.0.0-20190724013045-ca1201d0de80
golang.org/x/sys v0.0.0-20190730183949-1393eb018365 // indirect
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 // indirect
gopkg.in/jcmturner/goidentity.v3 v3.0.0 // indirect
)

119
go.sum
View File

@@ -1,119 +0,0 @@
github.com/DataDog/zstd v1.3.6-0.20190409195224-796139022798 h1:2T/jmrHeTezcCM58lvEQXs0UpQJCo5SoGAcg+mbSTIg=
github.com/DataDog/zstd v1.3.6-0.20190409195224-796139022798/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo=
github.com/Shopify/sarama v1.23.0 h1:slvlbm7bxyp7sKQbUwha5BQdZTqurhRoI+zbKorVigQ=
github.com/Shopify/sarama v1.23.0/go.mod h1:XLH1GYJnLVE0XCr6KdJGVJRTwY30moWNJ4sERjXX6fs=
github.com/Shopify/toxiproxy v2.1.4+incompatible h1:TKdv8HiTLgE5wdJuEML90aBgNWsokNbMijUGhmcoBJc=
github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI=
github.com/bitly/go-simplejson v0.5.0 h1:6IH+V8/tVMab511d5bn4M7EwGXZf9Hj6i2xSwkNEM+Y=
github.com/bitly/go-simplejson v0.5.0/go.mod h1:cXHtHw4XUPsvGaxgjIAn8PhEWG9NfngEKAMDJEczWVA=
github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 h1:DDGfHa7BWjL4YnC6+E63dPcxHo2sUxDIu8g3QgEJdRY=
github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4=
github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/eapache/go-resiliency v1.1.0 h1:1NtRmCAqadE2FN4ZcN6g90TP3uk8cg9rn9eNK2197aU=
github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs=
github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21 h1:YEetp8/yCZMuEPMUDHG0CW/brkkEp8mzqk2+ODEitlw=
github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU=
github.com/eapache/queue v1.1.0 h1:YOEu7KNc61ntiQlcEeUIoDTJ2o8mQznoNvUhiigpIqc=
github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I=
github.com/eclipse/paho.mqtt.golang v1.2.0 h1:1F8mhG9+aO5/xpdtFkW4SxOJB67ukuDC3t2y2qayIX0=
github.com/eclipse/paho.mqtt.golang v1.2.0/go.mod h1:H9keYFcgq3Qr5OUJm/JZI/i6U7joQ8SYLhZwfeOo6Ts=
github.com/gin-contrib/sse v0.0.0-20190301062529-5545eab6dad3 h1:t8FVkw33L+wilf2QiWkw0UV77qRpcH/JHPKGpKa2E8g=
github.com/gin-contrib/sse v0.0.0-20190301062529-5545eab6dad3/go.mod h1:VJ0WA2NBN22VlZ2dKZQPAPnyWw5XTlK1KymzLKsr59s=
github.com/gin-gonic/gin v1.4.0 h1:3tMoCCfM7ppqsR0ptz/wi1impNpT7/9wQtMZ8lr1mCQ=
github.com/gin-gonic/gin v1.4.0/go.mod h1:OW2EZn3DO8Ln9oIKOvM++LBO+5UPHJJDH72/q/3rZdM=
github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg=
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4=
github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/hashicorp/go-uuid v1.0.1 h1:fv1ep09latC32wFoVwnqcnKJGnMSdBanPczbHAYm1BE=
github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
github.com/jcmturner/gofork v0.0.0-20190328161633-dc7c13fece03 h1:FUwcHNlEqkqLjLBdCp5PRlCFijNjvcYANOZXzCfXwCM=
github.com/jcmturner/gofork v0.0.0-20190328161633-dc7c13fece03/go.mod h1:MK8+TM0La+2rjBD4jE12Kj1pCCxK7d2LK/UM3ncEo0o=
github.com/json-iterator/go v1.1.6 h1:MrUvLMLTMxbqFJ9kzlvat/rYZqZnW3u4wkLzWTaFwKs=
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/mattn/go-isatty v0.0.7 h1:UvyT9uN+3r7yLEYSlJsbQGdsaB/a0DlgWP3pql6iwOc=
github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI=
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/patrickmn/go-cache v2.1.0+incompatible h1:HRMgzkcYKYpi3C8ajMPV8OFXaaRUnok+kx1WdO15EQc=
github.com/patrickmn/go-cache v2.1.0+incompatible/go.mod h1:3Qf8kWWT7OJRJbdiICTKqZju1ZixQ/KpMGzzAfe6+WQ=
github.com/pierrec/lz4 v0.0.0-20190327172049-315a67e90e41 h1:GeinFsrjWz97fAxVUEd748aV0cYL+I6k44gFJTCVvpU=
github.com/pierrec/lz4 v0.0.0-20190327172049-315a67e90e41/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc=
github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a h1:9ZKAASQSHhDYGoxY8uLVpewe1GDZ2vu2Tr/vTdVAkFQ=
github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
github.com/satori/go.uuid v1.2.0 h1:0uYX9dsZ2yD7q2RtLRtPSdGDWzjeM3TbMJP9utgA0ww=
github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0=
github.com/segmentio/fasthash v0.0.0-20180216231524-a72b379d632e h1:uO75wNGioszjmIzcY/tvdDYKRLVvzggtAmmJkn9j4GQ=
github.com/segmentio/fasthash v0.0.0-20180216231524-a72b379d632e/go.mod h1:tm/wZFQ8e24NYaBGIlnO2WGCAi67re4HHuOm0sftE/M=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/tidwall/gjson v1.3.0 h1:kfpsw1W3trbg4Xm6doUtqSl9+LhLB6qJ9PkltVAQZYs=
github.com/tidwall/gjson v1.3.0/go.mod h1:P256ACg0Mn+j1RXIDXoss50DeIABTYK1PULOJHhxOls=
github.com/tidwall/match v1.0.1 h1:PnKP62LPNxHKTwvHHZZzdOAOCtsJTjo6dZLCwpKm5xc=
github.com/tidwall/match v1.0.1/go.mod h1:LujAq0jyVjBy028G1WhWfIzbpQfMO8bBZ6Tyb0+pL9E=
github.com/tidwall/pretty v1.0.0 h1:HsD+QiTn7sK6flMKIvNmpqz1qrpP3Ps6jOKIKMooyg4=
github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk=
github.com/ugorji/go v1.1.4 h1:j4s+tAvLfL3bZyefP2SEWmhBzmuIlH/eqNuPdFPgngw=
github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc=
github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I=
github.com/xdg/stringprep v1.0.0/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y=
go.uber.org/atomic v1.4.0 h1:cxzIVoETapQEqDhQu3QfnvXAV4AlzcvUCxkVUFw3+EU=
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/multierr v1.1.0 h1:HoEmRHQPVSqub6w2z2d2EOVs2fjyFRGyofhKuyDq0QI=
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
go.uber.org/zap v1.10.0 h1:ORx85nbTijNz8ljznvCMR1ZBIPKFn3jQrag10X2AsuM=
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190404164418-38d8ce5564a5/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE=
golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4 h1:HuIa8hRrWRSrqYzx1qI49NNxhdi2PrY7gxVSq1JjLDc=
golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c h1:uOCk1iQW6Vc18bnC13MfzScl+wdKBmM9Y9kU7Z83/lw=
golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190724013045-ca1201d0de80 h1:Ao/3l156eZf2AW5wK8a7/smtodRU+gha3+BeqJ69lRk=
golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190730183949-1393eb018365 h1:SaXEMXhWzMJThc05vu6uh61Q245r4KaWMrsTedk0FDc=
golang.org/x/sys v0.0.0-20190730183949-1393eb018365/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/go-playground/assert.v1 v1.2.1 h1:xoYuJVE7KT85PYWrN730RguIQO0ePzVRfFMXadIrXTM=
gopkg.in/go-playground/assert.v1 v1.2.1/go.mod h1:9RXL0bg/zibRAgZUYszZSwO/z8Y/a8bDuhia5mkpMnE=
gopkg.in/go-playground/validator.v8 v8.18.2 h1:lFB4DoMU6B626w8ny76MV7VX6W2VHct2GVOI3xgiMrQ=
gopkg.in/go-playground/validator.v8 v8.18.2/go.mod h1:RX2a/7Ha8BgOhfk7j780h4/u/RRjR0eouCJSH80/M2Y=
gopkg.in/jcmturner/aescts.v1 v1.0.1 h1:cVVZBK2b1zY26haWB4vbBiZrfFQnfbTVrE3xZq6hrEw=
gopkg.in/jcmturner/aescts.v1 v1.0.1/go.mod h1:nsR8qBOg+OucoIW+WMhB3GspUQXq9XorLnQb9XtvcOo=
gopkg.in/jcmturner/dnsutils.v1 v1.0.1 h1:cIuC1OLRGZrld+16ZJvvZxVJeKPsvd5eUIvxfoN5hSM=
gopkg.in/jcmturner/dnsutils.v1 v1.0.1/go.mod h1:m3v+5svpVOhtFAP/wSz+yzh4Mc0Fg7eRhxkJMWSIz9Q=
gopkg.in/jcmturner/goidentity.v3 v3.0.0 h1:1duIyWiTaYvVx3YX2CYtpJbUFd7/UuPYCfgXtQ3VTbI=
gopkg.in/jcmturner/goidentity.v3 v3.0.0/go.mod h1:oG2kH0IvSYNIu80dVAyu/yoefjq1mNfM5bm88whjWx4=
gopkg.in/jcmturner/gokrb5.v7 v7.2.3 h1:hHMV/yKPwMnJhPuPx7pH2Uw/3Qyf+thJYlisUc44010=
gopkg.in/jcmturner/gokrb5.v7 v7.2.3/go.mod h1:l8VISx+WGYp+Fp7KRbsiUuXTTOnxIc3Tuvyavf11/WM=
gopkg.in/jcmturner/rpc.v1 v1.1.0 h1:QHIUxTX1ISuAv9dD2wJ9HWQVuWDX/Zc0PfeC2tjc4rU=
gopkg.in/jcmturner/rpc.v1 v1.1.0/go.mod h1:YIdkC4XfD6GXbzje11McwsDuOlZQSb9W4vfLvuNnlv8=
gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=

View File

@@ -1,3 +1,5 @@
/* Copyright (c) 2018, joy.zhou <chowyu08@gmail.com>
*/
package acl package acl
import ( import (
@@ -5,13 +7,14 @@ import (
"errors" "errors"
"io" "io"
"os" "os"
"strconv"
"strings" "strings"
) )
const ( const (
SUB = "1" PUB = 1
PUB = "2" SUB = 2
PUBSUB = "3" PUBSUB = 3
CLIENTID = "clientid" CLIENTID = "clientid"
USERNAME = "username" USERNAME = "username"
IP = "ip" IP = "ip"
@@ -23,7 +26,7 @@ type AuthInfo struct {
Auth string Auth string
Typ string Typ string
Val string Val string
PubSub string PubSub int
Topics []string Topics []string
} }
@@ -33,6 +36,9 @@ type ACLConfig struct {
} }
func AclConfigLoad(file string) (*ACLConfig, error) { func AclConfigLoad(file string) (*ACLConfig, error) {
if file == "" {
file = "./conf/acl.conf"
}
aclconifg := &ACLConfig{ aclconifg := &ACLConfig{
File: file, File: file,
Info: make([]*AuthInfo, 0, 4), Info: make([]*AuthInfo, 0, 4),
@@ -75,16 +81,12 @@ func (c *ACLConfig) Prase() error {
parseErr = errors.New("\"" + line + "\" format is error") parseErr = errors.New("\"" + line + "\" format is error")
break break
} }
if tmpArr[3] != PUB && tmpArr[3] != SUB && tmpArr[3] != PUBSUB { var pubsub int
pubsub, err = strconv.Atoi(tmpArr[3])
if err != nil {
parseErr = errors.New("\"" + line + "\" format is error") parseErr = errors.New("\"" + line + "\" format is error")
break break
} }
// var pubsub int
// pubsub, err = strconv.Atoi(tmpArr[3])
// if err != nil {
// parseErr = errors.New("\"" + line + "\" format is error")
// break
// }
topicStr := strings.Replace(tmpArr[4], " ", "", -1) topicStr := strings.Replace(tmpArr[4], " ", "", -1)
topicStr = strings.Replace(topicStr, "\n", "", -1) topicStr = strings.Replace(topicStr, "\n", "", -1)
topics := strings.Split(topicStr, ",") topics := strings.Split(topicStr, ",")
@@ -93,7 +95,7 @@ func (c *ACLConfig) Prase() error {
Typ: tmpArr[1], Typ: tmpArr[1],
Val: tmpArr[2], Val: tmpArr[2],
Topics: topics, Topics: topics,
PubSub: tmpArr[3], PubSub: pubsub,
} }
c.Info = append(c.Info, tmpAuth) c.Info = append(c.Info, tmpAuth)
if err != nil { if err != nil {

View File

@@ -1,21 +1,22 @@
/* Copyright (c) 2018, joy.zhou <chowyu08@gmail.com>*/
package acl package acl
import "strings" import "strings"
func checkTopicAuth(ACLInfo *ACLConfig, action, ip, username, clientid, topic string) bool { func CheckTopicAuth(ACLInfo *ACLConfig, typ int, ip, username, clientid, topic string) bool {
for _, info := range ACLInfo.Info { for _, info := range ACLInfo.Info {
ctyp := info.Typ ctyp := info.Typ
switch ctyp { switch ctyp {
case CLIENTID: case CLIENTID:
if match, auth := info.checkWithClientID(action, clientid, topic); match { if match, auth := info.checkWithClientID(typ, clientid, topic); match {
return auth return auth
} }
case USERNAME: case USERNAME:
if match, auth := info.checkWithUsername(action, username, topic); match { if match, auth := info.checkWithUsername(typ, username, topic); match {
return auth return auth
} }
case IP: case IP:
if match, auth := info.checkWithIP(action, ip, topic); match { if match, auth := info.checkWithIP(typ, ip, topic); match {
return auth return auth
} }
} }
@@ -23,18 +24,18 @@ func checkTopicAuth(ACLInfo *ACLConfig, action, ip, username, clientid, topic st
return false return false
} }
func (a *AuthInfo) checkWithClientID(action, clientid, topic string) (bool, bool) { func (a *AuthInfo) checkWithClientID(typ int, clientid, topic string) (bool, bool) {
auth := false auth := false
match := false match := false
if a.Val == "*" || a.Val == clientid { if a.Val == "*" || a.Val == clientid {
for _, tp := range a.Topics { for _, tp := range a.Topics {
des := strings.Replace(tp, "%c", clientid, -1) des := strings.Replace(tp, "%c", clientid, -1)
if action == PUB { if typ == PUB {
if pubTopicMatch(topic, des) { if pubTopicMatch(topic, des) {
match = true match = true
auth = a.checkAuth(PUB) auth = a.checkAuth(PUB)
} }
} else if action == SUB { } else if typ == SUB {
if subTopicMatch(topic, des) { if subTopicMatch(topic, des) {
match = true match = true
auth = a.checkAuth(SUB) auth = a.checkAuth(SUB)
@@ -45,18 +46,18 @@ func (a *AuthInfo) checkWithClientID(action, clientid, topic string) (bool, bool
return match, auth return match, auth
} }
func (a *AuthInfo) checkWithUsername(action, username, topic string) (bool, bool) { func (a *AuthInfo) checkWithUsername(typ int, username, topic string) (bool, bool) {
auth := false auth := false
match := false match := false
if a.Val == "*" || a.Val == username { if a.Val == "*" || a.Val == username {
for _, tp := range a.Topics { for _, tp := range a.Topics {
des := strings.Replace(tp, "%u", username, -1) des := strings.Replace(tp, "%u", username, -1)
if action == PUB { if typ == PUB {
if pubTopicMatch(topic, des) { if pubTopicMatch(topic, des) {
match = true match = true
auth = a.checkAuth(PUB) auth = a.checkAuth(PUB)
} }
} else if action == SUB { } else if typ == SUB {
if subTopicMatch(topic, des) { if subTopicMatch(topic, des) {
match = true match = true
auth = a.checkAuth(SUB) auth = a.checkAuth(SUB)
@@ -67,18 +68,18 @@ func (a *AuthInfo) checkWithUsername(action, username, topic string) (bool, bool
return match, auth return match, auth
} }
func (a *AuthInfo) checkWithIP(action, ip, topic string) (bool, bool) { func (a *AuthInfo) checkWithIP(typ int, ip, topic string) (bool, bool) {
auth := false auth := false
match := false match := false
if a.Val == "*" || a.Val == ip { if a.Val == "*" || a.Val == ip {
for _, tp := range a.Topics { for _, tp := range a.Topics {
des := tp des := tp
if action == PUB { if typ == PUB {
if pubTopicMatch(topic, des) { if pubTopicMatch(topic, des) {
auth = a.checkAuth(PUB) auth = a.checkAuth(PUB)
match = true match = true
} }
} else if action == SUB { } else if typ == SUB {
if subTopicMatch(topic, des) { if subTopicMatch(topic, des) {
auth = a.checkAuth(SUB) auth = a.checkAuth(SUB)
match = true match = true
@@ -89,15 +90,15 @@ func (a *AuthInfo) checkWithIP(action, ip, topic string) (bool, bool) {
return match, auth return match, auth
} }
func (a *AuthInfo) checkAuth(action string) bool { func (a *AuthInfo) checkAuth(typ int) bool {
auth := false auth := false
if action == PUB { if typ == PUB {
if a.Auth == ALLOW && (a.PubSub == PUB || a.PubSub == PUBSUB) { if a.Auth == ALLOW && (a.PubSub == PUB || a.PubSub == PUBSUB) {
auth = true auth = true
} else if a.Auth == DENY && a.PubSub == SUB { } else if a.Auth == DENY && a.PubSub == SUB {
auth = true auth = true
} }
} else if action == SUB { } else if typ == SUB {
if a.Auth == ALLOW && (a.PubSub == SUB || a.PubSub == PUBSUB) { if a.Auth == ALLOW && (a.PubSub == SUB || a.PubSub == PUBSUB) {
auth = true auth = true
} else if a.Auth == DENY && a.PubSub == PUB { } else if a.Auth == DENY && a.PubSub == PUB {

View File

@@ -1,3 +1,5 @@
/* Copyright (c) 2018, joy.zhou <chowyu08@gmail.com>
*/
package acl package acl
import ( import (

View File

@@ -5,60 +5,43 @@ package logger
import ( import (
"go.uber.org/zap" "go.uber.org/zap"
"go.uber.org/zap/zapcore"
) )
var ( var (
// env can be setup at build time with Go Linker. Value could be prod or whatever else for dev env logInstance *zap.Logger
instance *zap.Logger
logCfg zap.Config
encoderCfg = zap.NewProductionEncoderConfig()
) )
func init() { // InitDevLogger instanciate a logger for dev builds
encoderCfg.TimeKey = "timestamp" func InitDevLogger() {
encoderCfg.EncodeTime = zapcore.ISO8601TimeEncoder logCfg := zap.NewDevelopmentConfig()
logInstance, _ = logCfg.Build()
} }
// NewDevLogger return a logger for dev builds // InitProdLogger instanciate a logger for production builds
func NewDevLogger() (*zap.Logger, error) { func InitProdLogger() {
logCfg := zap.NewProductionConfig()
logCfg.Level = zap.NewAtomicLevelAt(zap.DebugLevel)
// logCfg.DisableStacktrace = true
logCfg.EncoderConfig = encoderCfg
return logCfg.Build()
}
// NewProdLogger return a logger for production builds
func NewProdLogger() (*zap.Logger, error) {
logCfg := zap.NewProductionConfig() logCfg := zap.NewProductionConfig()
logCfg.DisableStacktrace = true logCfg.DisableStacktrace = true
logCfg.Level = zap.NewAtomicLevelAt(zap.InfoLevel) logCfg.Level = zap.NewAtomicLevelAt(zap.InfoLevel)
logCfg.EncoderConfig = encoderCfg logInstance, _ = logCfg.Build()
return logCfg.Build()
} }
func Prod() *zap.Logger { func InitLogger(debug bool) {
var err error
l, _ := NewProdLogger() if debug {
instance = l InitDevLogger()
} else {
return instance InitProdLogger()
}
func Debug() *zap.Logger {
l, _ := NewDevLogger()
instance = l
return instance
}
func Get() *zap.Logger {
if instance == nil {
l, _ := NewProdLogger()
instance = l
} }
if err != nil {
return instance panic("Unable to create a logger.")
}
logInstance.Debug("Logger initialization succeeded")
}
// Get the existing *zap.Logger instance. If none have been created, it'll instanciate de dev logger
func Get() *zap.Logger {
if logInstance == nil {
InitDevLogger()
}
return logInstance
} }

View File

@@ -1,12 +1,12 @@
/* /* Copyright (c) 2018, joy.zhou <chowyu08@gmail.com>
Copyright (c) 2018, joy.zhou <chowyu08@gmail.com> */
*/
package logger package logger
import ( import (
"testing" "testing"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"go.uber.org/zap" "go.uber.org/zap"
) )
@@ -19,15 +19,11 @@ func TestGet(t *testing.T) {
} }
func TestNewDevLogger(t *testing.T) { func TestNewDevLogger(t *testing.T) {
logger, err := NewDevLogger() InitDevLogger()
assert.True(t, Get().Core().Enabled(zap.DebugLevel))
assert.Nil(t, err)
assert.True(t, logger.Core().Enabled(zap.DebugLevel))
} }
func TestNewProdLogger(t *testing.T) { func TestNewProdLogger(t *testing.T) {
logger, err := NewProdLogger() InitProdLogger()
assert.False(t, Get().Core().Enabled(zap.DebugLevel))
assert.Nil(t, err)
assert.False(t, logger.Core().Enabled(zap.DebugLevel))
} }

21
main.go
View File

@@ -1,29 +1,38 @@
/* Copyright (c) 2018, joy.zhou <chowyu08@gmail.com>
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
*/
package main package main
import ( import (
"log" "fmt"
"os" "os"
"os/signal" "os/signal"
"runtime" "runtime"
"github.com/fhmq/hmq/broker" "github.com/fhmq/hmq/broker"
"github.com/fhmq/hmq/logger"
) )
func main() { func main() {
runtime.GOMAXPROCS(runtime.NumCPU()) runtime.GOMAXPROCS(runtime.NumCPU())
config, err := broker.ConfigureConfig(os.Args[1:]) config, err := broker.ConfigureConfig(os.Args[1:])
if err != nil { if err != nil {
log.Fatal("configure broker config error: ", err) fmt.Println("configure broker config error: ", err)
return
} }
logger.InitLogger(config.Debug)
b, err := broker.NewBroker(config) b, err := broker.NewBroker(config, logger.Get())
if err != nil { if err != nil {
log.Fatal("New Broker error: ", err) fmt.Println("New Broker error: ", err)
return
} }
b.Start() b.Start()
s := waitForSignal() s := waitForSignal()
log.Println("signal received, broker closed.", s) fmt.Println("signal received, broker closed.", s)
} }
func waitForSignal() os.Signal { func waitForSignal() os.Signal {

View File

@@ -1,27 +0,0 @@
package auth
import (
authfile "github.com/fhmq/hmq/plugins/auth/authfile"
"github.com/fhmq/hmq/plugins/auth/authhttp"
)
const (
AuthHTTP = "authhttp"
AuthFile = "authfile"
)
type Auth interface {
CheckACL(action, clientID, username, ip, topic string) bool
CheckConnect(clientID, username, password string) bool
}
func NewAuth(name string) Auth {
switch name {
case AuthHTTP:
return authhttp.Init()
case AuthFile:
return authfile.Init()
default:
return &mockAuth{}
}
}

View File

@@ -1,54 +0,0 @@
## ACL Configure
```
Attention: Acl Type Change, change `pub =1, sub=2` to `sub =1, pub=2`
```
#### The ACL rules define:
~~~
Allow | type | value | pubsub | Topics
~~~
#### ACL Config
~~~
## type clientid , username, ipaddr
##sub 1 , pub 2, pubsub 3
## %c is clientid , %u is username
allow ip 127.0.0.1 2 $SYS/#
allow clientid 0001 3 #
allow username admin 3 #
allow username joy 3 /test,hello/world
allow clientid * 1 toCloud/%c
allow username * 1 toCloud/%u
deny clientid * 3 #
~~~
~~~
#allow local sub $SYS topic
allow ip 127.0.0.1 1 $SYS/#
~~~
~~~
#allow client who's id with 0001 or username with admin pub sub all topic
allow clientid 0001 3 #
allow username admin 3 #
~~~
~~~
#allow client with the username joy can pub sub topic '/test' and 'hello/world'
allow username joy 3 /test,hello/world
~~~
~~~
#allow all client pub the topic toCloud/{clientid/username}
allow clientid * 2 toCloud/%c
allow username * 2 toCloud/%u
~~~
~~~
#deny all client pub sub all topic
deny clientid * 3 #
~~~
Client match acl rule one by one
~~~
--------- --------- ---------
Client -> | Rule1 | --nomatch--> | Rule2 | --nomatch--> | Rule3 | -->
--------- --------- ---------
| | |
match match match
\|/ \|/ \|/
allow | deny allow | deny allow | deny
~~~

View File

@@ -1,23 +0,0 @@
package acl
type aclAuth struct {
config *ACLConfig
}
func Init() *aclAuth {
aclConfig, err := AclConfigLoad("./plugins/auth/authfile/acl.conf")
if err != nil {
panic(err)
}
return &aclAuth{
config: aclConfig,
}
}
func (a *aclAuth) CheckConnect(clientID, username, password string) bool {
return true
}
func (a *aclAuth) CheckACL(action, clientID, username, ip, topic string) bool {
return checkTopicAuth(a.config, action, username, ip, clientID, topic)
}

View File

@@ -1,179 +0,0 @@
package authhttp
import (
"encoding/json"
"io"
"io/ioutil"
"net/http"
"net/url"
"strconv"
"strings"
"time"
"github.com/fhmq/hmq/logger"
"go.uber.org/zap"
)
//Config device kafka config
type Config struct {
AuthURL string `json:"auth"`
ACLURL string `json:"acl"`
SuperURL string `json:"super"`
}
type authHTTP struct {
client *http.Client
}
var (
config Config
log = logger.Get().Named("authhttp")
httpClient *http.Client
)
//Init init kafak client
func Init() *authHTTP {
content, err := ioutil.ReadFile("./plugins/auth/authhttp/http.json")
if err != nil {
log.Fatal("Read config file error: ", zap.Error(err))
}
// log.Info(string(content))
err = json.Unmarshal(content, &config)
if err != nil {
log.Fatal("Unmarshal config file error: ", zap.Error(err))
}
// fmt.Println("http: config: ", config)
httpClient = &http.Client{
Transport: &http.Transport{
MaxConnsPerHost: 100,
MaxIdleConns: 100,
MaxIdleConnsPerHost: 100,
},
Timeout: time.Second * 100,
}
return &authHTTP{client: httpClient}
}
//CheckAuth check mqtt connect
func (a *authHTTP) CheckConnect(clientID, username, password string) bool {
action := "connect"
{
aCache := checkCache(action, clientID, username, password, "")
if aCache != nil {
if aCache.password == password && aCache.username == username && aCache.action == action {
return true
}
}
}
data := url.Values{}
data.Add("username", username)
data.Add("clientid", clientID)
data.Add("password", password)
req, err := http.NewRequest("POST", config.AuthURL, strings.NewReader(data.Encode()))
if err != nil {
log.Error("new request super: ", zap.Error(err))
return false
}
req.Header.Add("Content-Type", "application/x-www-form-urlencoded")
req.Header.Add("Content-Length", strconv.Itoa(len(data.Encode())))
resp, err := a.client.Do(req)
if err != nil {
log.Error("request super: ", zap.Error(err))
return false
}
defer resp.Body.Close()
io.Copy(ioutil.Discard, resp.Body)
if resp.StatusCode == http.StatusOK {
addCache(action, clientID, username, password, "")
return true
}
return false
}
// //CheckSuper check mqtt connect
// func CheckSuper(clientID, username, password string) bool {
// action := "connect"
// {
// aCache := checkCache(action, clientID, username, password, "")
// if aCache != nil {
// if aCache.password == password && aCache.username == username && aCache.action == action {
// return true
// }
// }
// }
// data := url.Values{}
// data.Add("username", username)
// data.Add("clientid", clientID)
// data.Add("password", password)
// req, err := http.NewRequest("POST", config.SuperURL, strings.NewReader(data.Encode()))
// if err != nil {
// log.Error("new request super: ", zap.Error(err))
// return false
// }
// req.Header.Add("Content-Type", "application/x-www-form-urlencoded")
// req.Header.Add("Content-Length", strconv.Itoa(len(data.Encode())))
// resp, err := httpClient.Do(req)
// if err != nil {
// log.Error("request super: ", zap.Error(err))
// return false
// }
// defer resp.Body.Close()
// io.Copy(ioutil.Discard, resp.Body)
// if resp.StatusCode == http.StatusOK {
// return true
// }
// return false
// }
//CheckACL check mqtt connect
func (a *authHTTP) CheckACL(action, clientID, username, ip, topic string) bool {
{
aCache := checkCache(action, "", username, "", topic)
if aCache != nil {
if aCache.topic == topic && aCache.action == action {
return true
}
}
}
req, err := http.NewRequest("GET", config.ACLURL, nil)
if err != nil {
log.Error("get acl: ", zap.Error(err))
return false
}
data := req.URL.Query()
data.Add("username", username)
data.Add("topic", topic)
data.Add("access", action)
req.URL.RawQuery = data.Encode()
// fmt.Println("req:", req)
resp, err := a.client.Do(req)
if err != nil {
log.Error("request acl: ", zap.Error(err))
return false
}
defer resp.Body.Close()
io.Copy(ioutil.Discard, resp.Body)
if resp.StatusCode == http.StatusOK {
addCache(action, "", username, "", topic)
return true
}
return false
}

View File

@@ -1,32 +0,0 @@
package authhttp
import (
"time"
"github.com/patrickmn/go-cache"
)
type authCache struct {
action string
username string
clientID string
password string
topic string
}
var (
// cache = make(map[string]authCache)
c = cache.New(5*time.Minute, 10*time.Minute)
)
func checkCache(action, clientID, username, password, topic string) *authCache {
authc, found := c.Get(username)
if found {
return authc.(*authCache)
}
return nil
}
func addCache(action, clientID, username, password, topic string) {
c.Set(username, &authCache{action: action, username: username, clientID: clientID, password: password, topic: topic}, cache.DefaultExpiration)
}

View File

@@ -1,5 +0,0 @@
{
"auth": "http://127.0.0.1:9090/mqtt/auth",
"acl": "http://127.0.0.1:9090/mqtt/acl",
"super": "http://127.0.0.1:9090/mqtt/superuser"
}

View File

@@ -1,11 +0,0 @@
package auth
type mockAuth struct{}
func (m *mockAuth) CheckACL(action, clientID, username, ip, topic string) bool {
return true
}
func (m *mockAuth) CheckConnect(clientID, username, password string) bool {
return true
}

View File

@@ -1,49 +0,0 @@
package bridge
import "github.com/fhmq/hmq/logger"
const (
//Connect mqtt connect
Connect = "connect"
//Publish mqtt publish
Publish = "publish"
//Subscribe mqtt sub
Subscribe = "subscribe"
//Unsubscribe mqtt sub
Unsubscribe = "unsubscribe"
//Disconnect mqtt disconenct
Disconnect = "disconnect"
)
var (
log = logger.Get().Named("bridge")
)
//Elements kafka publish elements
type Elements struct {
ClientID string `json:"clientid"`
Username string `json:"username"`
Topic string `json:"topic"`
Payload string `json:"payload"`
Timestamp int64 `json:"ts"`
Size int32 `json:"size"`
Action string `json:"action"`
}
const (
//Kafka plugin name
Kafka = "kafka"
)
type BridgeMQ interface {
Publish(e *Elements) error
}
func NewBridgeMQ(name string) BridgeMQ {
switch name {
case Kafka:
return InitKafka()
default:
return &mockMQ{}
}
}

View File

@@ -1,156 +0,0 @@
package bridge
import (
"encoding/json"
"errors"
"io/ioutil"
"strings"
"time"
"github.com/Shopify/sarama"
"go.uber.org/zap"
)
type kafakConfig struct {
Addr []string `json:"addr"`
ConnectTopic string `json:"onConnect"`
SubscribeTopic string `json:"onSubscribe"`
PublishTopic string `json:"onPublish"`
UnsubscribeTopic string `json:"onUnsubscribe"`
DisconnectTopic string `json:"onDisconnect"`
DeliverMap map[string]string `json:"deliverMap"`
}
type kafka struct {
kafakConfig kafakConfig
kafkaClient sarama.AsyncProducer
}
//Init init kafak client
func InitKafka() *kafka {
log.Info("start connect kafka....")
content, err := ioutil.ReadFile("./plugins/kafka/kafka.json")
if err != nil {
log.Fatal("Read config file error: ", zap.Error(err))
}
// log.Info(string(content))
var config kafakConfig
err = json.Unmarshal(content, &config)
if err != nil {
log.Fatal("Unmarshal config file error: ", zap.Error(err))
}
c := &kafka{kafakConfig: config}
c.connect()
return c
}
//connect
func (k *kafka) connect() {
conf := sarama.NewConfig()
conf.Version = sarama.V1_1_1_0
kafkaClient, err := sarama.NewAsyncProducer(k.kafakConfig.Addr, conf)
if err != nil {
log.Fatal("create kafka async producer failed: ", zap.Error(err))
}
go func() {
for err := range kafkaClient.Errors() {
log.Error("send msg to kafka failed: ", zap.Error(err))
}
}()
k.kafkaClient = kafkaClient
}
//Publish publish to kafka
func (k *kafka) Publish(e *Elements) error {
config := k.kafakConfig
key := e.ClientID
topics := make(map[string]bool)
switch e.Action {
case Connect:
if config.ConnectTopic != "" {
topics[config.ConnectTopic] = true
}
case Publish:
if config.PublishTopic != "" {
topics[config.PublishTopic] = true
}
// foreach regexp map config
for reg, topic := range config.DeliverMap {
match := matchTopic(reg, e.Topic)
if match {
topics[topic] = true
}
}
case Subscribe:
if config.SubscribeTopic != "" {
topics[config.SubscribeTopic] = true
}
case Unsubscribe:
if config.UnsubscribeTopic != "" {
topics[config.UnsubscribeTopic] = true
}
case Disconnect:
if config.DisconnectTopic != "" {
topics[config.DisconnectTopic] = true
}
default:
return errors.New("error action: " + e.Action)
}
return k.publish(topics, key, e)
}
func (k *kafka) publish(topics map[string]bool, key string, msg *Elements) error {
payload, err := json.Marshal(msg)
if err != nil {
return err
}
for topic, _ := range topics {
select {
case k.kafkaClient.Input() <- &sarama.ProducerMessage{
Topic: topic,
Key: sarama.ByteEncoder(key),
Value: sarama.ByteEncoder(payload),
}:
continue
case <-time.After(5 * time.Second):
return errors.New("write kafka timeout")
}
}
return nil
}
func match(subTopic []string, topic []string) bool {
if len(subTopic) == 0 {
if len(topic) == 0 {
return true
}
return false
}
if len(topic) == 0 {
if subTopic[0] == "#" {
return true
}
return false
}
if subTopic[0] == "#" {
return true
}
if (subTopic[0] == "+") || (subTopic[0] == topic[0]) {
return match(subTopic[1:], topic[1:])
}
return false
}
func matchTopic(subTopic string, topic string) bool {
return match(strings.Split(subTopic, "/"), strings.Split(topic, "/"))
}

View File

@@ -1,14 +0,0 @@
{
"addr": [
"127.0.0.1:9090"
],
"onConnect": "onConnect",
"onPublish": "onPublish",
"onSubscribe": "onSubscribe",
"onDisconnect": "onDisconnect",
"onUnsubscribe": "onUnsubscribe",
"deliverMap": {
"#": "publish",
"/upload/+/#": "upload"
}
}

View File

@@ -1,7 +0,0 @@
package bridge
type mockMQ struct{}
func (m *mockMQ) Publish(e *Elements) error {
return nil
}

View File

@@ -1,58 +0,0 @@
package pool
import (
"github.com/segmentio/fasthash/fnv1a"
)
type WorkerPool struct {
maxWorkers int
taskQueue []chan func()
stoppedChan chan struct{}
}
func New(maxWorkers int) *WorkerPool {
// There must be at least one worker.
if maxWorkers < 1 {
maxWorkers = 1
}
// taskQueue is unbuffered since items are always removed immediately.
pool := &WorkerPool{
taskQueue: make([]chan func(), maxWorkers),
maxWorkers: maxWorkers,
stoppedChan: make(chan struct{}),
}
// Start the task dispatcher.
pool.dispatch()
return pool
}
func (p *WorkerPool) Submit(uid string, task func()) {
idx := fnv1a.HashString64(uid) % uint64(p.maxWorkers)
if task != nil {
p.taskQueue[idx] <- task
}
}
func (p *WorkerPool) dispatch() {
for i := 0; i < p.maxWorkers; i++ {
p.taskQueue[i] = make(chan func(), 1024)
go startWorker(p.taskQueue[i])
}
}
func startWorker(taskChan chan func()) {
go func() {
var task func()
var ok bool
for {
task, ok = <-taskChan
if !ok {
break
}
// Execute the task.
task()
}
}()
}

View File

@@ -1,166 +1,166 @@
package pool package pool
// import "time" import "time"
// const ( const (
// // This value is the size of the queue that workers register their // This value is the size of the queue that workers register their
// // availability to the dispatcher. There may be hundreds of workers, but // availability to the dispatcher. There may be hundreds of workers, but
// // only a small channel is needed to register some of the workers. // only a small channel is needed to register some of the workers.
// readyQueueSize = 64 readyQueueSize = 16
// // If worker pool receives no new work for this period of time, then stop // If worker pool receives no new work for this period of time, then stop
// // a worker goroutine. // a worker goroutine.
// idleTimeoutSec = 5 idleTimeoutSec = 5
// ) )
// type WorkerPool struct { type WorkerPool struct {
// maxWorkers int maxWorkers int
// timeout time.Duration timeout time.Duration
// taskQueue chan func() taskQueue chan func()
// readyWorkers chan chan func() readyWorkers chan chan func()
// stoppedChan chan struct{} stoppedChan chan struct{}
// } }
// func New(maxWorkers int) *WorkerPool { func New(maxWorkers int) *WorkerPool {
// // There must be at least one worker. // There must be at least one worker.
// if maxWorkers < 1 { if maxWorkers < 1 {
// maxWorkers = 1 maxWorkers = 1
// } }
// // taskQueue is unbuffered since items are always removed immediately. // taskQueue is unbuffered since items are always removed immediately.
// pool := &WorkerPool{ pool := &WorkerPool{
// taskQueue: make(chan func()), taskQueue: make(chan func()),
// maxWorkers: maxWorkers, maxWorkers: maxWorkers,
// readyWorkers: make(chan chan func(), readyQueueSize), readyWorkers: make(chan chan func(), readyQueueSize),
// timeout: time.Second * idleTimeoutSec, timeout: time.Second * idleTimeoutSec,
// stoppedChan: make(chan struct{}), stoppedChan: make(chan struct{}),
// } }
// // Start the task dispatcher. // Start the task dispatcher.
// go pool.dispatch() go pool.dispatch()
// return pool return pool
// } }
// func (p *WorkerPool) Stop() { func (p *WorkerPool) Stop() {
// if p.Stopped() { if p.Stopped() {
// return return
// } }
// close(p.taskQueue) close(p.taskQueue)
// <-p.stoppedChan <-p.stoppedChan
// } }
// func (p *WorkerPool) Stopped() bool { func (p *WorkerPool) Stopped() bool {
// select { select {
// case <-p.stoppedChan: case <-p.stoppedChan:
// return true return true
// default: default:
// } }
// return false return false
// } }
// func (p *WorkerPool) Submit(task func()) { func (p *WorkerPool) Submit(task func()) {
// if task != nil { if task != nil {
// p.taskQueue <- task p.taskQueue <- task
// } }
// } }
// func (p *WorkerPool) SubmitWait(task func()) { func (p *WorkerPool) SubmitWait(task func()) {
// if task == nil { if task == nil {
// return return
// } }
// doneChan := make(chan struct{}) doneChan := make(chan struct{})
// p.taskQueue <- func() { p.taskQueue <- func() {
// task() task()
// close(doneChan) close(doneChan)
// } }
// <-doneChan <-doneChan
// } }
// func (p *WorkerPool) dispatch() { func (p *WorkerPool) dispatch() {
// defer close(p.stoppedChan) defer close(p.stoppedChan)
// timeout := time.NewTimer(p.timeout) timeout := time.NewTimer(p.timeout)
// var workerCount int var workerCount int
// var task func() var task func()
// var ok bool var ok bool
// var workerTaskChan chan func() var workerTaskChan chan func()
// startReady := make(chan chan func()) startReady := make(chan chan func())
// Loop: Loop:
// for { for {
// timeout.Reset(p.timeout) timeout.Reset(p.timeout)
// select { select {
// case task, ok = <-p.taskQueue: case task, ok = <-p.taskQueue:
// if !ok { if !ok {
// break Loop break Loop
// } }
// // Got a task to do. // Got a task to do.
// select { select {
// case workerTaskChan = <-p.readyWorkers: case workerTaskChan = <-p.readyWorkers:
// // A worker is ready, so give task to worker. // A worker is ready, so give task to worker.
// workerTaskChan <- task workerTaskChan <- task
// default: default:
// // No workers ready. // No workers ready.
// // Create a new worker, if not at max. // Create a new worker, if not at max.
// if workerCount < p.maxWorkers { if workerCount < p.maxWorkers {
// workerCount++ workerCount++
// go func(t func()) { go func(t func()) {
// startWorker(startReady, p.readyWorkers) startWorker(startReady, p.readyWorkers)
// // Submit the task when the new worker. // Submit the task when the new worker.
// taskChan := <-startReady taskChan := <-startReady
// taskChan <- t taskChan <- t
// }(task) }(task)
// } else { } else {
// // Start a goroutine to submit the task when an existing // Start a goroutine to submit the task when an existing
// // worker is ready. // worker is ready.
// go func(t func()) { go func(t func()) {
// taskChan := <-p.readyWorkers taskChan := <-p.readyWorkers
// taskChan <- t taskChan <- t
// }(task) }(task)
// } }
// } }
// case <-timeout.C: case <-timeout.C:
// // Timed out waiting for work to arrive. Kill a ready worker. // Timed out waiting for work to arrive. Kill a ready worker.
// if workerCount > 0 { if workerCount > 0 {
// select { select {
// case workerTaskChan = <-p.readyWorkers: case workerTaskChan = <-p.readyWorkers:
// // A worker is ready, so kill. // A worker is ready, so kill.
// close(workerTaskChan) close(workerTaskChan)
// workerCount-- workerCount--
// default: default:
// // No work, but no ready workers. All workers are busy. // No work, but no ready workers. All workers are busy.
// } }
// } }
// } }
// } }
// // Stop all remaining workers as they become ready. // Stop all remaining workers as they become ready.
// for workerCount > 0 { for workerCount > 0 {
// workerTaskChan = <-p.readyWorkers workerTaskChan = <-p.readyWorkers
// close(workerTaskChan) close(workerTaskChan)
// workerCount-- workerCount--
// } }
// } }
// func startWorker(startReady, readyWorkers chan chan func()) { func startWorker(startReady, readyWorkers chan chan func()) {
// go func() { go func() {
// taskChan := make(chan func()) taskChan := make(chan func())
// var task func() var task func()
// var ok bool var ok bool
// // Register availability on starReady channel. // Register availability on starReady channel.
// startReady <- taskChan startReady <- taskChan
// for { for {
// // Read task from dispatcher. // Read task from dispatcher.
// task, ok = <-taskChan task, ok = <-taskChan
// if !ok { if !ok {
// // Dispatcher has told worker to stop. // Dispatcher has told worker to stop.
// break break
// } }
// // Execute the task. // Execute the task.
// task() task()
// // Register availability on readyWorkers channel. // Register availability on readyWorkers channel.
// readyWorkers <- taskChan readyWorkers <- taskChan
// } }
// }() }()
// } }