This commit is contained in:
zhouyuyan
2017-08-24 15:11:04 +08:00
parent e73776d670
commit 853ad54178
10 changed files with 929 additions and 133 deletions

View File

@@ -9,10 +9,17 @@ import (
)
type Broker struct {
sl *Sublist
rl *RetainList
queues map[string]int
}
func NewBroker() *Broker {
return &Broker{}
return &Broker{
sl: NewSublist(),
rl: NewRetainList(),
queues: make(map[string]int),
}
}
func (b *Broker) StartListening() {
l, e := net.Listen("tcp", "0.0.0.0:1883")
@@ -56,6 +63,16 @@ func (b *Broker) handleConnection(conn net.Conn, idx int) {
log.Error(err)
return
}
connack := message.NewConnackMessage()
connack.SetReturnCode(message.ConnectionAccepted)
ack, _ := EncodeMessage(connack)
err1 := WriteBuffer(conn, ack)
if err1 != nil {
log.Error("send connack error, ", err1)
return
}
willmsg := message.NewPublishMessage()
if connMsg.WillFlag() {
willmsg.SetQoS(connMsg.WillQos())
@@ -80,10 +97,5 @@ func (b *Broker) handleConnection(conn net.Conn, idx int) {
info: info,
}
c.init()
c.woker = Worker{
WorkerPool: MyDispatcher,
MsgChannel: make(chan *Message),
quit: make(chan bool),
}
c.readLoop(idx)
}

View File

@@ -1,20 +1,30 @@
package broker
import (
"errors"
"fhmq/lib/message"
"net"
"strings"
"sync"
"github.com/prometheus/common/log"
)
type client struct {
mu sync.Mutex
broker *Broker
conn net.Conn
info info
localIP string
remoteIP string
woker Worker
subs map[string]*subscription
}
type subscription struct {
client *client
topic []byte
qos byte
queue bool
}
type info struct {
@@ -26,6 +36,7 @@ type info struct {
}
func (c *client) init() {
c.subs = make(map[string]*subscription, 10)
c.localIP = strings.Split(c.conn.LocalAddr().String(), ":")[0]
c.remoteIP = strings.Split(c.conn.RemoteAddr().String(), ":")[0]
}
@@ -36,15 +47,316 @@ func (c *client) readLoop(idx int) {
if nc == nil || msgPool == nil {
return
}
msg := &Message{}
for {
buf, err := ReadPacket(nc)
if err != nil {
log.Error("read packet error: ", err)
c.Close()
return
}
msg.client = c
msg.buf = buf
msgPool.Push(msg)
msg.msg = buf
msgPool.queue <- msg
}
msgPool.Reduce()
}
func ProcessMessage(msg *Message) {
buf := msg.msg
c := msg.client
if c == nil || buf == nil {
return
}
msgType := uint8(buf[0] & 0xF0 >> 4)
switch msgType {
case CONNACK:
// log.Info("Recv conack message..........")
c.ProcessConnAck(buf)
case CONNECT:
// log.Info("Recv connect message..........")
c.ProcessConnect(buf)
case PUBLISH:
// log.Info("Recv publish message..........")
c.ProcessPublish(buf)
case PUBACK:
//log.Info("Recv publish ack message..........")
c.ProcessPubAck(buf)
case PUBCOMP:
//log.Info("Recv publish ack message..........")
c.ProcessPubComp(buf)
case PUBREC:
//log.Info("Recv publish rec message..........")
c.ProcessPubREC(buf)
case PUBREL:
//log.Info("Recv publish rel message..........")
c.ProcessPubREL(buf)
case SUBSCRIBE:
// log.Info("Recv subscribe message.....")
c.ProcessSubscribe(buf)
case SUBACK:
// log.Info("Recv suback message.....")
case UNSUBSCRIBE:
// log.Info("Recv unsubscribe message.....")
c.ProcessUnSubscribe(buf)
case UNSUBACK:
//log.Info("Recv unsuback message.....")
case PINGREQ:
// log.Info("Recv PINGREQ message..........")
c.ProcessPing(buf)
case PINGRESP:
//log.Info("Recv PINGRESP message..........")
case DISCONNECT:
// log.Info("Recv DISCONNECT message.......")
c.Close()
default:
log.Info("Recv Unknow message.......")
}
}
func (c *client) ProcessConnect(buf []byte) {
}
func (c *client) ProcessConnAck(buf []byte) {
}
func (c *client) ProcessPublish(buf []byte) {
msg, err := DecodePublishMessage(buf)
if err != nil {
log.Error("Decode Publish Message error: ", err)
return
}
c.ProcessPublishMessage(buf, msg)
}
func (c *client) ProcessPublishMessage(buf []byte, msg *message.PublishMessage) {
b := c.broker
if b == nil {
return
}
topic := string(msg.Topic())
r := b.sl.Match(topic)
// log.Info("psubs num: ", len(r.psubs))
if len(r.qsubs) == 0 && len(r.psubs) == 0 {
return
}
for _, sub := range r.psubs {
// if sub.client.typ == ROUTER {
// if typ == ROUTER {
// continue
// }
// }
if sub != nil {
err := sub.client.writeBuffer(buf)
if err != nil {
log.Error("process message for psub error, ", err)
}
}
}
for i, sub := range r.qsubs {
// if sub.client.typ == ROUTER {
// if typ == ROUTER {
// continue
// }
// }
// s.qmu.Lock()
if cnt, exist := b.queues[string(sub.topic)]; exist && i == cnt {
if sub != nil {
err := sub.client.writeBuffer(buf)
if err != nil {
log.Error("process will message for qsub error, ", err)
}
}
b.queues[topic] = (b.queues[topic] + 1) % len(r.qsubs)
break
}
// s.qmu.Unlock()
}
}
func (c *client) ProcessPubAck(buf []byte) {
}
func (c *client) ProcessPubREC(buf []byte) {
}
func (c *client) ProcessPubREL(buf []byte) {
}
func (c *client) ProcessPubComp(buf []byte) {
}
func (c *client) ProcessSubscribe(buf []byte) {
srv := c.broker
if srv == nil {
return
}
msg, err := DecodeSubscribeMessage(buf)
if err != nil {
log.Error("Decode Subscribe Message error: ", err)
c.Close()
return
}
topics := msg.Topics()
qos := msg.Qos()
suback := message.NewSubackMessage()
suback.SetPacketId(msg.PacketId())
var retcodes []byte
for i, t := range topics {
topic := string(t)
//check topic auth for client
// if !c.CheckTopicAuth(topic, SUB) {
// log.Error("CheckSubAuth failed")
// retcodes = append(retcodes, message.QosFailure)
// continue
// }
if _, exist := c.subs[topic]; !exist {
queue := false
if strings.HasPrefix(topic, "$queue/") {
if len(t) > 7 {
t = t[7:]
queue = true
// srv.qmu.Lock()
if _, exists := srv.queues[topic]; !exists {
srv.queues[topic] = 0
}
// srv.qmu.Unlock()
} else {
retcodes = append(retcodes, message.QosFailure)
continue
}
}
sub := &subscription{
topic: t,
qos: qos[i],
client: c,
queue: queue,
}
// c.mu.Lock()
c.subs[topic] = sub
// c.mu.Unlock()
err := srv.sl.Insert(sub)
if err != nil {
log.Error("Insert subscription error: ", err)
retcodes = append(retcodes, message.QosFailure)
}
retcodes = append(retcodes, qos[i])
} else {
//if exist ,check whether qos change
c.subs[topic].qos = qos[i]
retcodes = append(retcodes, qos[i])
}
}
if err := suback.AddReturnCodes(retcodes); err != nil {
log.Error("add return suback code error, ", err)
// if typ == CLIENT {
c.Close()
// }
return
}
err1 := c.writeMessage(suback)
if err1 != nil {
log.Error("send suback error, ", err1)
return
}
//broadcast subscribe message
// if typ == CLIENT {
// srv.startGoRoutine(func() {
// srv.BroadcastSubscribeMessage(buf)
// })
// }
//process retain message
// for _, t := range topics {
// srv.startGoRoutine(func() {
// bufs := srv.rl.Match(t)
// for _, buf := range bufs {
// log.Info("process retain message: ", string(buf))
// if buf != nil && string(buf) != "" {
// c.writeBuffer(buf)
// }
// }
// })
// }
}
func (c *client) ProcessUnSubscribe(buf []byte) {
}
func (c *client) ProcessPing(buf []byte) {
_, err := DecodePingreqMessage(buf)
if err != nil {
log.Error("Decode PingRequest Message error: ", err)
c.Close()
return
}
pingRspMsg := message.NewPingrespMessage()
err = c.writeMessage(pingRspMsg)
if err != nil {
log.Error("send PingResponse error, ", err)
return
}
}
func (c *client) Close() {
srv := c.broker
subs := c.subs
if srv != nil {
// srv.removeClient(c)
for _, sub := range subs {
// log.Info("remove Sub")
err := srv.sl.Remove(sub)
if err != nil {
log.Error("closed client but remove sublist error, ", err)
}
// if c.typ == CLIENT {
// srv.BroadcastUnSubscribe(sub)
// }
}
}
if c.conn != nil {
c.conn.Close()
c.conn = nil
}
}
func WriteBuffer(conn net.Conn, buf []byte) error {
if conn == nil {
return errors.New("conn is nul")
}
_, err := conn.Write(buf)
return err
}
func (c *client) writeBuffer(buf []byte) error {
c.mu.Lock()
err := WriteBuffer(c.conn, buf)
c.mu.Unlock()
return err
}
func (c *client) writeMessage(msg message.Message) error {
buf, err := EncodeMessage(msg)
if err != nil {
return err
}
return c.writeBuffer(buf)
}

130
broker/comm.go Normal file
View File

@@ -0,0 +1,130 @@
package broker
import (
"bytes"
"errors"
"reflect"
"strings"
"time"
)
const (
// ACCEPT_MIN_SLEEP is the minimum acceptable sleep times on temporary errors.
ACCEPT_MIN_SLEEP = 100 * time.Millisecond
// ACCEPT_MAX_SLEEP is the maximum acceptable sleep times on temporary errors
ACCEPT_MAX_SLEEP = 10 * time.Second
// DEFAULT_ROUTE_CONNECT Route solicitation intervals.
DEFAULT_ROUTE_CONNECT = 5 * time.Second
// DEFAULT_TLS_TIMEOUT
DEFAULT_TLS_TIMEOUT = 5 * time.Second
)
const (
CONNECT = uint8(iota + 1)
CONNACK
PUBLISH
PUBACK
PUBREC
PUBREL
PUBCOMP
SUBSCRIBE
SUBACK
UNSUBSCRIBE
UNSUBACK
PINGREQ
PINGRESP
DISCONNECT
)
func SubscribeTopicCheckAndSpilt(subject []byte) ([]string, error) {
topic := string(subject)
if bytes.IndexByte(subject, '#') != -1 {
if bytes.IndexByte(subject, '#') != len(subject)-1 {
return nil, errors.New("Topic format error with index of #")
}
}
re := strings.Split(topic, "/")
for i, v := range re {
if i != 0 && i != (len(re)-1) {
if v == "" {
return nil, errors.New("Topic format error with index of //")
}
if strings.Contains(v, "+") && v != "+" {
return nil, errors.New("Topic format error with index of +")
}
} else {
if v == "" {
re[i] = "/"
}
}
}
return re, nil
}
func PublishTopicCheckAndSpilt(subject []byte) ([]string, error) {
if bytes.IndexByte(subject, '#') != -1 || bytes.IndexByte(subject, '+') != -1 {
return nil, errors.New("Publish Topic format error with + and #")
}
topic := string(subject)
re := strings.Split(topic, "/")
for i, v := range re {
if v == "" {
if i != 0 && i != (len(re)-1) {
return nil, errors.New("Topic format error with index of //")
} else {
re[i] = "/"
}
}
}
return re, nil
}
func equal(k1, k2 interface{}) bool {
if reflect.TypeOf(k1) != reflect.TypeOf(k2) {
return false
}
if reflect.ValueOf(k1).Kind() == reflect.Func {
return &k1 == &k2
}
if k1 == k2 {
return true
}
switch k1 := k1.(type) {
case string:
return k1 == k2.(string)
case int64:
return k1 == k2.(int64)
case int32:
return k1 == k2.(int32)
case int16:
return k1 == k2.(int16)
case int8:
return k1 == k2.(int8)
case int:
return k1 == k2.(int)
case float32:
return k1 == k2.(float32)
case float64:
return k1 == k2.(float64)
case uint:
return k1 == k2.(uint)
case uint8:
return k1 == k2.(uint8)
case uint16:
return k1 == k2.(uint16)
case uint32:
return k1 == k2.(uint32)
case uint64:
return k1 == k2.(uint64)
case uintptr:
return k1 == k2.(uintptr)
}
return false
}

View File

@@ -1,31 +0,0 @@
package broker
import "time"
const (
// ACCEPT_MIN_SLEEP is the minimum acceptable sleep times on temporary errors.
ACCEPT_MIN_SLEEP = 100 * time.Millisecond
// ACCEPT_MAX_SLEEP is the maximum acceptable sleep times on temporary errors
ACCEPT_MAX_SLEEP = 10 * time.Second
// DEFAULT_ROUTE_CONNECT Route solicitation intervals.
DEFAULT_ROUTE_CONNECT = 5 * time.Second
// DEFAULT_TLS_TIMEOUT
DEFAULT_TLS_TIMEOUT = 5 * time.Second
)
const (
CONNECT = uint8(iota + 1)
CONNACK
PUBLISH
PUBACK
PUBREC
PUBREL
PUBCOMP
SUBSCRIBE
SUBACK
UNSUBSCRIBE
UNSUBACK
PINGREQ
PINGRESP
DISCONNECT
)

View File

@@ -1,19 +1,11 @@
package broker
const (
WorkPoolNum = 1024
WorkNum = 2048
MaxUser = 1024 * 1024
MessagePoolNum = 1024
MessagePoolUser = MaxUser / MessagePoolNum
MessagePoolMessageNum = MaxUser / MessagePoolNum * 4
// MessageBoxNum = 256
// MessageBoxUserNum = MaxUser / MessageBoxNum
// MessageBoxMessageLength = MessageBoxUserNum
)
var (
MyDispatcher Dispatcher
)
type Dispatcher struct {
@@ -21,8 +13,23 @@ type Dispatcher struct {
}
func init() {
workerPool = make(chan chan *Message, WorkPoolNum)
MyDispatcher = &Dispatcher{WorkerPool: workerPool}
InitMessagePool()
dispatcher := NewDispatcher()
dispatcher.Run()
}
func (d *Dispatcher) Run() {
// starting n number of workers
for i := 0; i < WorkNum; i++ {
worker := NewWorker(d.WorkerPool)
worker.Start()
}
go d.dispatch()
}
func NewDispatcher() *Dispatcher {
pool := make(chan chan *Message, WorkNum)
return &Dispatcher{WorkerPool: pool}
}
func (d *Dispatcher) dispatch() {
@@ -30,7 +37,7 @@ func (d *Dispatcher) dispatch() {
go func(idx int) {
for {
select {
case msg := <-MSGPool[idx].Pop():
case msg := <-MSGPool[idx].queue:
go func(msg *Message) {
msgChannel := <-d.WorkerPool
msgChannel <- msg

View File

@@ -5,7 +5,6 @@ import "sync"
type Message struct {
client *client
msg []byte
// pool *MessagePool
}
var (
@@ -19,12 +18,11 @@ type MessagePool struct {
queue chan *Message
}
func init() []MessagePool {
MSGPool = make([]MessagePool, (MessagePoolNum + 2))
for i := 0; i < (MessagePoolNum + 2); i++ {
func InitMessagePool() {
MSGPool = make([]MessagePool, MessagePoolNum)
for i := 0; i < MessagePoolNum; i++ {
MSGPool[i].Init(MessagePoolUser, MessagePoolMessageNum)
}
return MSGPool
}
func (p *MessagePool) Init(num int, maxusernum int) {
@@ -51,14 +49,3 @@ func (p *MessagePool) Reduce() {
p.l.Unlock()
}
func (p *MessagePool) Pop() *Message {
p2 := <-p.queue
return p2
}
func (p *MessagePool) Push(pmessage *Message) {
p.queue <- pmessage
}

118
broker/retain.go Normal file
View File

@@ -0,0 +1,118 @@
package broker
import "sync"
type RetainList struct {
sync.RWMutex
root *rlevel
}
type rlevel struct {
nodes map[string]*rnode
}
type rnode struct {
next *rlevel
msg []byte
}
type RetainResult struct {
msg [][]byte
}
func newRNode() *rnode {
return &rnode{msg: make([]byte, 0, 4)}
}
func newRLevel() *rlevel {
return &rlevel{nodes: make(map[string]*rnode)}
}
func NewRetainList() *RetainList {
return &RetainList{root: newRLevel()}
}
func (r *RetainList) Insert(topic, buf []byte) error {
tokens, err := PublishTopicCheckAndSpilt(topic)
if err != nil {
return err
}
// log.Info("insert tokens:", tokens)
r.Lock()
l := r.root
var n *rnode
for _, t := range tokens {
n = l.nodes[t]
if n == nil {
n = newRNode()
l.nodes[t] = n
}
if n.next == nil {
n.next = newRLevel()
}
l = n.next
}
n.msg = buf
r.Unlock()
return nil
}
func (r *RetainList) Match(topic []byte) [][]byte {
tokens, err := SubscribeTopicCheckAndSpilt(topic)
if err != nil {
return nil
}
results := &RetainResult{}
r.Lock()
l := r.root
matchRLevel(l, tokens, results)
r.Unlock()
// log.Info("results: ", results)
return results.msg
}
func matchRLevel(l *rlevel, toks []string, results *RetainResult) {
var n *rnode
for i, t := range toks {
if l == nil {
return
}
// log.Info("l info :", l.nodes)
if t == "#" {
for _, n := range l.nodes {
n.GetAll(results)
}
}
if t == "+" {
for _, n := range l.nodes {
if len(t[i+1:]) == 0 {
results.msg = append(results.msg, n.msg)
} else {
matchRLevel(n.next, toks[i+1:], results)
}
}
}
n = l.nodes[t]
if n != nil {
l = n.next
} else {
l = nil
}
}
if n != nil {
results.msg = append(results.msg, n.msg)
}
}
func (r *rnode) GetAll(results *RetainResult) {
// log.Info("node 's message: ", string(r.msg))
if r.msg != nil && string(r.msg) != "" {
results.msg = append(results.msg, r.msg)
}
l := r.next
for _, n := range l.nodes {
n.GetAll(results)
}
}

318
broker/sublist.go Normal file
View File

@@ -0,0 +1,318 @@
package broker
import (
"errors"
"sync"
log "github.com/cihub/seelog"
)
// A result structure better optimized for queue subs.
type SublistResult struct {
psubs []*subscription
qsubs []*subscription // don't make this a map, too expensive to iterate
}
// A Sublist stores and efficiently retrieves subscriptions.
type Sublist struct {
sync.RWMutex
cache map[string]*SublistResult
root *level
}
// A node contains subscriptions and a pointer to the next level.
type node struct {
next *level
psubs []*subscription
qsubs []*subscription
}
// A level represents a group of nodes and special pointers to
// wildcard nodes.
type level struct {
nodes map[string]*node
}
// Create a new default node.
func newNode() *node {
return &node{psubs: make([]*subscription, 0, 4), qsubs: make([]*subscription, 0, 4)}
}
// Create a new default level. We use FNV1A as the hash
// algortihm for the tokens, which should be short.
func newLevel() *level {
return &level{nodes: make(map[string]*node)}
}
// New will create a default sublist
func NewSublist() *Sublist {
return &Sublist{root: newLevel(), cache: make(map[string]*SublistResult)}
}
// Insert adds a subscription into the sublist
func (s *Sublist) Insert(sub *subscription) error {
tokens, err := SubscribeTopicCheckAndSpilt(sub.topic)
if err != nil {
return err
}
s.Lock()
l := s.root
var n *node
for _, t := range tokens {
n = l.nodes[t]
if n == nil {
n = newNode()
l.nodes[t] = n
}
if n.next == nil {
n.next = newLevel()
}
l = n.next
}
if sub.queue {
//check qsub is already exist
for i := range n.qsubs {
if equal(n.qsubs[i], sub) {
n.qsubs[i] = sub
return nil
}
}
n.qsubs = append(n.qsubs, sub)
} else {
//check psub is already exist
for i := range n.psubs {
if equal(n.psubs[i], sub) {
n.psubs[i] = sub
return nil
}
}
n.psubs = append(n.psubs, sub)
}
topic := string(sub.topic)
s.addToCache(topic, sub)
s.Unlock()
return nil
}
func (s *Sublist) addToCache(topic string, sub *subscription) {
for k, r := range s.cache {
if matchLiteral(k, topic) {
// Copy since others may have a reference.
nr := copyResult(r)
if sub.queue == false {
nr.psubs = append(nr.psubs, sub)
} else {
nr.qsubs = append(nr.qsubs, sub)
}
s.cache[k] = nr
}
}
}
func (s *Sublist) removeFromCache(topic string, sub *subscription) {
for k := range s.cache {
if !matchLiteral(k, topic) {
continue
}
// Since someone else may be referecing, can't modify the list
// safely, just let it re-populate.
delete(s.cache, k)
}
}
func matchLiteral(literal, topic string) bool {
tok, _ := SubscribeTopicCheckAndSpilt([]byte(topic))
li, _ := PublishTopicCheckAndSpilt([]byte(literal))
for i := 0; i < len(tok); i++ {
b := tok[i]
switch b {
case "+":
case "#":
return true
default:
if b != li[i] {
return false
}
}
}
return true
}
// Deep copy
func copyResult(r *SublistResult) *SublistResult {
nr := &SublistResult{}
nr.psubs = append([]*subscription(nil), r.psubs...)
nr.qsubs = append([]*subscription(nil), r.qsubs...)
return nr
}
func (s *Sublist) Remove(sub *subscription) error {
tokens, err := SubscribeTopicCheckAndSpilt(sub.topic)
if err != nil {
return err
}
s.Lock()
defer s.Unlock()
l := s.root
var n *node
for _, t := range tokens {
if l == nil {
return errors.New("No Matches subscription Found")
}
n = l.nodes[t]
if n != nil {
l = n.next
} else {
l = nil
}
}
if !s.removeFromNode(n, sub) {
return errors.New("No Matches subscription Found")
}
topic := string(sub.topic)
s.removeFromCache(topic, sub)
return nil
}
func (s *Sublist) removeFromNode(n *node, sub *subscription) (found bool) {
if n == nil {
return false
}
if sub.queue {
n.qsubs, found = removeSubFromList(sub, n.qsubs)
return found
} else {
n.psubs, found = removeSubFromList(sub, n.psubs)
return found
}
return false
}
func (s *Sublist) Match(topic string) *SublistResult {
s.RLock()
rc, ok := s.cache[topic]
s.RUnlock()
if ok {
return rc
}
tokens, err := PublishTopicCheckAndSpilt([]byte(topic))
if err != nil {
log.Error("\tserver/sublist.go: ", err)
return nil
}
result := &SublistResult{}
s.Lock()
l := s.root
if len(tokens) > 0 {
if tokens[0] == "/" {
if _, exist := l.nodes["#"]; exist {
addNodeToResults(l.nodes["#"], result)
}
if _, exist := l.nodes["+"]; exist {
matchLevel(l.nodes["/"].next, tokens[1:], result)
}
if _, exist := l.nodes["/"]; exist {
matchLevel(l.nodes["/"].next, tokens[1:], result)
}
} else {
matchLevel(s.root, tokens, result)
}
}
s.cache[topic] = result
if len(s.cache) > 1024 {
for k := range s.cache {
delete(s.cache, k)
break
}
}
s.Unlock()
// log.Info("SublistResult: ", result)
return result
}
func matchLevel(l *level, toks []string, results *SublistResult) {
var swc, n *node
exist := false
for i, t := range toks {
if l == nil {
return
}
if _, exist = l.nodes["#"]; exist {
addNodeToResults(l.nodes["#"], results)
}
if t != "/" {
if swc, exist = l.nodes["+"]; exist {
matchLevel(l.nodes["+"].next, toks[i+1:], results)
}
} else {
if _, exist = l.nodes["+"]; exist {
addNodeToResults(l.nodes["+"], results)
}
}
n = l.nodes[t]
if n != nil {
l = n.next
} else {
l = nil
}
}
if n != nil {
addNodeToResults(n, results)
}
if swc != nil {
addNodeToResults(n, results)
}
}
// This will add in a node's results to the total results.
func addNodeToResults(n *node, results *SublistResult) {
results.psubs = append(results.psubs, n.psubs...)
results.qsubs = append(results.qsubs, n.qsubs...)
}
func removeSubFromList(sub *subscription, sl []*subscription) ([]*subscription, bool) {
for i := 0; i < len(sl); i++ {
if sl[i] == sub {
last := len(sl) - 1
sl[i] = sl[last]
sl[last] = nil
sl = sl[:last]
// log.Info("removeSubFromList success")
return shrinkAsNeeded(sl), true
}
}
return sl, false
}
// Checks if we need to do a resize. This is for very large growth then
// subsequent return to a more normal size from unsubscribe.
func shrinkAsNeeded(sl []*subscription) []*subscription {
lsl := len(sl)
csl := cap(sl)
// Don't bother if list not too big
if csl <= 8 {
return sl
}
pFree := float32(csl-lsl) / float32(csl)
if pFree > 0.50 {
return append([]*subscription(nil), sl...)
}
return sl
}

View File

@@ -6,6 +6,13 @@ type Worker struct {
quit chan bool
}
func NewWorker(workerPool chan chan *Message) Worker {
return Worker{
WorkerPool: workerPool,
MsgChannel: make(chan *Message),
quit: make(chan bool)}
}
func (w Worker) Start() {
go func() {
for {
@@ -14,8 +21,8 @@ func (w Worker) Start() {
select {
case msg := <-w.MsgChannel:
// we have received a work request.
ProcessMessage(msg)
case <-w.quit:
// we have received a signal to stop
return
}
}

View File

@@ -1,64 +0,0 @@
package main
import (
"fmt"
"os"
)
var (
MaxWorker = os.Getenv("MAX_WORKERS")
MaxQueue = os.Getenv("MAX_QUEUE")
)
// Job represents the job to be run
type Job struct {
Payload Payload
}
// A buffered channel that we can send work requests on.
var JobQueue chan Job
// Worker represents the worker that executes the job
type Worker struct {
WorkerPool chan chan Job
JobChannel chan Job
quit chan bool
}
func NewWorker(workerPool chan chan Job) Worker {
return Worker{
WorkerPool: workerPool,
JobChannel: make(chan Job),
quit: make(chan bool)}
}
// Start method starts the run loop for the worker, listening for a quit channel in
// case we need to stop it
func (w Worker) Start() {
go func() {
for {
// register the current worker into the worker queue.
w.WorkerPool <- w.JobChannel
select {
case job := <-w.JobChannel:
// we have received a work request.
fmt.Println("process msg ")
// if err := job.Payload.UploadToS3(); err != nil {
// log.Errorf("Error uploading to S3: %s", err.Error())
// }
case <-w.quit:
// we have received a signal to stop
return
}
}
}()
}
// Stop signals the worker to stop listening for work requests.
func (w Worker) Stop() {
go func() {
w.quit <- true
}()
}