6 Commits

Author SHA1 Message Date
Marc Magnin
d52d8dda07 #17 Enable use of MQTT broker as a library -> passing a logger reference to the broker instance 2018-02-08 11:13:50 +01:00
Marc Magnin
148dbbb23c #13 introduced sync.Pool 2018-02-05 15:14:46 +01:00
joy.zhou
0ff20b6ee2 Update README.md 2018-02-03 13:11:53 +08:00
joy.zhou
7155667f6c Pool (#16)
* add pool

* elastic workerpool

* del buf

* modify usage

* modify readme
2018-02-03 12:42:25 +08:00
zhouyuyan
83db82cdcc Merge branch 'master' of https://github.com/fhmq/hmq 2018-01-31 11:00:29 +08:00
zhouyuyan
b3653bcfb1 fix #14 2018-01-31 10:59:59 +08:00
13 changed files with 378 additions and 283 deletions

View File

@@ -16,43 +16,26 @@ $ go run main.go
## Usage of hmq:
~~~
Usage of ./hmq:
-w int
worker num to process message, perfer (client num)/10. (default 1024)
-worker int
worker num to process message, perfer (client num)/10. (default 1024)
-h string
Network host to listen on. (default "0.0.0.0")
-host string
Network host to listen on. (default "0.0.0.0")
-p string
Port to listen on. (default "1883")
-port string
Port to listen on. (default "1883")
-c string
config file for hmq
-config string
config file for hmq
-cluster string
Cluster ip from which members can connect.
-cluster_listen string
Cluster ip from which members can connect.
-cluster_port string
Cluster port from which members can connect.
-cp string
Cluster port from which members can connect.
-r string
Router who maintenance cluster info
-router string
Router who maintenance cluster info
-ws_path string
path for ws to listen on
-ws_port string
port for ws to listen on
-wspath string
path for ws to listen on
-wsport string
port for ws to listen on
Usage: hmq [options]
Broker Options:
-w, --worker <number> Worker num to process message, perfer (client num)/10. (default 1024)
-p, --port <port> Use port for clients (default: 1883)
--host <host> Network host to listen on. (default "0.0.0.0")
-ws, --wsport <port> Use port for websocket monitoring
-wsp,--wspath <path> Use path for websocket monitoring
-c, --config <file> Configuration file
Logging Options:
-d, --debug <bool> Enable debugging output (default false)
-D Debug enabled
Cluster Options:
-r, --router <rurl> Router who maintenance cluster info
-cp, --clusterport <cluster-port> Cluster listen port for others
Common Options:
-h, --help Show this message
~~~
### hmq.config
@@ -105,6 +88,9 @@ Usage of ./hmq:
### Cluster
```bash
1, start router for hmq (https://github.com/fhmq/router.git)
$ go get github.com/fhmq/router
$ cd $GOPATH/github.com/fhmq/router
$ go run main.go
2, config router in hmq.config ("router": "127.0.0.1:9888")
```

View File

@@ -6,6 +6,7 @@ import (
"strings"
"github.com/fhmq/hmq/lib/acl"
"go.uber.org/zap"
"github.com/fsnotify/fsnotify"

View File

@@ -18,38 +18,45 @@ import (
"go.uber.org/zap"
"golang.org/x/net/websocket"
"github.com/fhmq/hmq/logger"
)
var (
log = logger.Get().Named("Broker")
log *zap.Logger
messagePoolQueueSize = 4096
)
type Broker struct {
id string
cid uint64
mu sync.Mutex
config *Config
tlsConfig *tls.Config
AclConfig *acl.ACLConfig
clients sync.Map
routes sync.Map
remotes sync.Map
nodes map[string]interface{}
sl *Sublist
rl *RetainList
queues map[string]int
type Message struct {
client *client
packet packets.ControlPacket
}
func NewBroker(config *Config) (*Broker, error) {
type Broker struct {
id string
cid uint64
mu sync.Mutex
config *Config
tlsConfig *tls.Config
AclConfig *acl.ACLConfig
dispatcher *Dispatcher
clients sync.Map
routes sync.Map
remotes sync.Map
nodes map[string]interface{}
sl *Sublist
rl *RetainList
queues map[string]int
}
func NewBroker(config *Config, logger *zap.Logger) (*Broker, error) {
log = logger
b := &Broker{
id: GenUniqueId(),
config: config,
sl: NewSublist(),
rl: NewRetainList(),
nodes: make(map[string]interface{}),
queues: make(map[string]int),
id: GenUniqueId(),
config: config,
dispatcher: NewDispatcher(),
sl: NewSublist(),
rl: NewRetainList(),
nodes: make(map[string]interface{}),
queues: make(map[string]int),
}
if b.config.TlsPort != "" {
tlsconfig, err := NewTLSConfig(b.config.TlsInfo)
@@ -71,12 +78,16 @@ func NewBroker(config *Config) (*Broker, error) {
return b, nil
}
func (b *Broker) DispatchMessage(msg *Message) {
b.dispatcher.Dispatch(msg)
}
func (b *Broker) Start() {
if b == nil {
log.Error("broker is null")
return
}
StartDispatcher()
//listen clinet over tcp
if b.config.Port != "" {
@@ -124,7 +135,7 @@ func StateMonitor() {
func (b *Broker) StartWebsocketListening() {
path := b.config.WsPath
hp := ":" + b.config.WsPort
log.Info("Start Websocket Listening on ", zap.String("hp", hp), zap.String("path", path))
log.Info("Start Websocket Listener on:", zap.String("hp", hp), zap.String("path", path))
http.Handle(path, websocket.Handler(b.wsHandler))
var err error
if b.config.WsTLS {
@@ -133,7 +144,7 @@ func (b *Broker) StartWebsocketListening() {
err = http.ListenAndServe(hp, nil)
}
if err != nil {
log.Error("ListenAndServe: " + err.Error())
log.Error("ListenAndServe:" + err.Error())
return
}
}
@@ -303,34 +314,27 @@ func (b *Broker) handleConnection(typ int, conn net.Conn, idx uint64) {
cid := c.info.clientID
var msgPool *MessagePool
var exist bool
var old interface{}
switch typ {
case CLIENT:
msgPool = MSGPool[idx%MessagePoolNum].GetPool()
c.mp = msgPool
old, exist = b.clients.Load(cid)
if exist {
log.Warn("client exist, close old...", zap.String("clientID", c.info.clientID))
ol, ok := old.(*client)
if ok {
msg := &Message{client: c, packet: DisconnectdPacket}
ol.mp.queue <- msg
ol.Close()
}
}
b.clients.Store(cid, c)
case ROUTER:
msgPool = MSGPool[(MessagePoolNum + idx)].GetPool()
c.mp = msgPool
old, exist = b.routes.Load(cid)
if exist {
log.Warn("router exist, close old...")
ol, ok := old.(*client)
if ok {
msg := &Message{client: c, packet: DisconnectdPacket}
ol.mp.queue <- msg
ol.Close()
}
}
b.routes.Store(cid, c)
@@ -383,7 +387,6 @@ func (b *Broker) ConnectToDiscovery() {
c.SendConnect()
c.SendInfo()
c.mp = &MSGPool[(MessagePoolNum + 2)]
go c.readLoop()
go c.StartPing()
}
@@ -446,10 +449,7 @@ func (b *Broker) connectRouter(id, addr string) {
c.init()
b.remotes.Store(cid, c)
c.mp = MSGPool[(MessagePoolNum + 1)].GetPool()
c.SendConnect()
// c.SendInfo()
go c.readLoop()
go c.StartPing()

View File

@@ -4,6 +4,7 @@ package broker
import (
"net"
"reflect"
"strings"
"sync"
"time"
@@ -38,7 +39,6 @@ type client struct {
status int
closed chan int
smu sync.RWMutex
mp *MessagePool
subs map[string]*subscription
rsubs map[string]*subInfo
}
@@ -90,7 +90,6 @@ func (c *client) keepAlive(ch chan int) {
defer close(ch)
keepalive := time.Duration(c.info.keepalive*3/2) * time.Second
timer := time.NewTimer(keepalive)
msgPool := c.mp
for {
select {
@@ -102,8 +101,7 @@ func (c *client) keepAlive(ch chan int) {
continue
}
log.Error("Client exceeded timeout, disconnecting. ", zap.String("ClientID", c.info.clientID), zap.Uint16("keepalive", c.info.keepalive))
msg := &Message{client: c, packet: DisconnectdPacket}
msgPool.queue <- msg
c.broker.DispatchMessage(&Message{client: c, packet: DisconnectdPacket})
timer.Stop()
return
case _, ok := <-c.closed:
@@ -116,8 +114,7 @@ func (c *client) keepAlive(ch chan int) {
func (c *client) readLoop() {
nc := c.conn
msgPool := c.mp
if nc == nil || msgPool == nil {
if nc == nil {
return
}
@@ -130,18 +127,16 @@ func (c *client) readLoop() {
log.Error("read packet error: ", zap.Error(err), zap.String("ClientID", c.info.clientID))
break
}
// keepalive channel
ch <- 1
msg := &Message{
c.broker.DispatchMessage(&Message{
client: c,
packet: packet,
}
msgPool.queue <- msg
})
}
msg := &Message{client: c, packet: DisconnectdPacket}
msgPool.queue <- msg
msgPool.Reduce()
c.broker.DispatchMessage(&Message{client: c, packet: DisconnectdPacket})
}
func ProcessMessage(msg *Message) {
@@ -150,10 +145,10 @@ func ProcessMessage(msg *Message) {
if ca == nil {
return
}
log.Debug("Recv message from client,", zap.String("ClientID", c.info.clientID))
log.Debug("Recv message:", zap.String("message type", reflect.TypeOf(msg.packet).String()[9:]), zap.String("ClientID", c.info.clientID))
switch ca.(type) {
case *packets.ConnackPacket:
case *packets.ConnectPacket:
case *packets.PublishPacket:
packet := ca.(*packets.PublishPacket)

View File

@@ -10,7 +10,9 @@ import (
"flag"
"fmt"
"io/ioutil"
"os"
"github.com/fhmq/hmq/logger"
"go.uber.org/zap"
)
@@ -28,6 +30,7 @@ type Config struct {
TlsInfo TLSInfo `json:"tlsInfo"`
Acl bool `json:"acl"`
AclConf string `json:"aclConf"`
Debug bool `json:"-"`
}
type RouteInfo struct {
@@ -49,30 +52,60 @@ var DefaultConfig *Config = &Config{
Acl: false,
}
func ConfigureConfig() (*Config, error) {
func showHelp() {
fmt.Printf("%s\n", usageStr)
os.Exit(0)
}
func ConfigureConfig(args []string) (*Config, error) {
config := &Config{}
var (
help bool
configFile string
)
flag.IntVar(&config.Worker, "w", 1024, "worker num to process message, perfer (client num)/10.")
flag.IntVar(&config.Worker, "worker", 1024, "worker num to process message, perfer (client num)/10.")
flag.StringVar(&config.Port, "port", "1883", "Port to listen on.")
flag.StringVar(&config.Port, "p", "1883", "Port to listen on.")
flag.StringVar(&config.Host, "host", "0.0.0.0", "Network host to listen on.")
flag.StringVar(&config.Host, "h", "0.0.0.0", "Network host to listen on.")
flag.StringVar(&config.Cluster.Host, "cluster", "", "Cluster ip from which members can connect.")
flag.StringVar(&config.Cluster.Host, "cluster_listen", "", "Cluster ip from which members can connect.")
flag.StringVar(&config.Cluster.Port, "cp", "", "Cluster port from which members can connect.")
flag.StringVar(&config.Cluster.Port, "cluster_port", "", "Cluster port from which members can connect.")
flag.StringVar(&config.Router, "r", "", "Router who maintenance cluster info")
flag.StringVar(&config.Router, "router", "", "Router who maintenance cluster info")
flag.StringVar(&config.WsPort, "wsport", "", "port for ws to listen on")
flag.StringVar(&config.WsPort, "ws_port", "", "port for ws to listen on")
flag.StringVar(&config.WsPath, "wspath", "", "path for ws to listen on")
flag.StringVar(&config.WsPath, "ws_path", "", "path for ws to listen on")
flag.StringVar(&configFile, "config", "", "config file for hmq")
flag.StringVar(&configFile, "c", "", "config file for hmq")
flag.Parse()
fs := flag.NewFlagSet("hmq-broker", flag.ExitOnError)
fs.Usage = showHelp
fs.BoolVar(&help, "h", false, "Show this message.")
fs.BoolVar(&help, "help", false, "Show this message.")
fs.IntVar(&config.Worker, "w", 1024, "worker num to process message, perfer (client num)/10.")
fs.IntVar(&config.Worker, "worker", 1024, "worker num to process message, perfer (client num)/10.")
fs.StringVar(&config.Port, "port", "1883", "Port to listen on.")
fs.StringVar(&config.Port, "p", "1883", "Port to listen on.")
fs.StringVar(&config.Host, "host", "0.0.0.0", "Network host to listen on")
fs.StringVar(&config.Cluster.Port, "cp", "", "Cluster port from which members can connect.")
fs.StringVar(&config.Cluster.Port, "clusterport", "", "Cluster port from which members can connect.")
fs.StringVar(&config.Router, "r", "", "Router who maintenance cluster info")
fs.StringVar(&config.Router, "router", "", "Router who maintenance cluster info")
fs.StringVar(&config.WsPort, "ws", "", "port for ws to listen on")
fs.StringVar(&config.WsPort, "wsport", "", "port for ws to listen on")
fs.StringVar(&config.WsPath, "wsp", "", "path for ws to listen on")
fs.StringVar(&config.WsPath, "wspath", "", "path for ws to listen on")
fs.StringVar(&configFile, "config", "", "config file for hmq")
fs.StringVar(&configFile, "c", "", "config file for hmq")
fs.BoolVar(&config.Debug, "debug", false, "enable Debug logging.")
fs.BoolVar(&config.Debug, "d", false, "enable Debug logging.")
fs.Bool("D", true, "enable Debug logging.")
if err := fs.Parse(args); err != nil {
return nil, err
}
if help {
showHelp()
return nil, nil
}
fs.Visit(func(f *flag.Flag) {
switch f.Name {
case "D":
config.Debug = true
}
})
logger.InitLogger(config.Debug)
log = logger.Get().Named("Broker")
if configFile != "" {
tmpConfig, e := LoadConfig(configFile)
@@ -116,8 +149,6 @@ func (config *Config) check() error {
config.Worker = 1024
}
WorkNum = config.Worker
if config.Port != "" {
if config.Host == "" {
config.Host = "0.0.0.0"

View File

@@ -1,46 +1,25 @@
/* Copyright (c) 2018, joy.zhou <chowyu08@gmail.com>
*/
package broker
var WorkNum int
import (
"sync"
)
// Dispatcher will delegate ProcessMessage func to multiple goroutines
type Dispatcher struct {
WorkerPool chan chan *Message
}
func StartDispatcher() {
InitMessagePool()
dispatcher := NewDispatcher()
dispatcher.Run()
}
func (d *Dispatcher) Run() {
// starting n number of workers
for i := 0; i < WorkNum; i++ {
worker := NewWorker(d.WorkerPool)
worker.Start()
}
go d.dispatch()
workerPool *sync.Pool
}
// NewDispatcher create a *Dispatcher instance
func NewDispatcher() *Dispatcher {
pool := make(chan chan *Message, WorkNum)
return &Dispatcher{WorkerPool: pool}
}
func (d *Dispatcher) dispatch() {
for i := 0; i < (MessagePoolNum + 3); i++ {
go func(idx int) {
for {
select {
case msg := <-MSGPool[idx].queue:
go func(msg *Message) {
msgChannel := <-d.WorkerPool
msgChannel <- msg
}(msg)
}
}
}(i)
return &Dispatcher{workerPool: &sync.Pool{
New: func() interface{} {
return NewWorker()
},
},
}
}
// Dispatch a message to the workers
func (d *Dispatcher) Dispatch(message *Message) {
d.workerPool.Get().(Worker).WorkerChannel <- Work{WorkerPool: d.workerPool, Message: message}
}

View File

@@ -1,64 +0,0 @@
/* Copyright (c) 2018, joy.zhou <chowyu08@gmail.com>
*/
package broker
import (
"sync"
"github.com/eclipse/paho.mqtt.golang/packets"
)
const (
MaxUser = 1024 * 1024
MessagePoolNum = 1024
MessagePoolUser = MaxUser / MessagePoolNum
MessagePoolMessageNum = MaxUser / MessagePoolNum * 4
)
type Message struct {
client *client
packet packets.ControlPacket
}
var (
MSGPool []MessagePool
)
type MessagePool struct {
l sync.Mutex
maxuser int
user int
queue chan *Message
}
func InitMessagePool() {
MSGPool = make([]MessagePool, (MessagePoolNum + 3))
for i := 0; i < (MessagePoolNum + 3); i++ {
MSGPool[i].Init(MessagePoolUser, MessagePoolMessageNum)
}
}
func (p *MessagePool) Init(num int, maxusernum int) {
p.maxuser = maxusernum
p.queue = make(chan *Message, num)
}
func (p *MessagePool) GetPool() *MessagePool {
p.l.Lock()
if p.user+1 < p.maxuser {
p.user += 1
p.l.Unlock()
return p
} else {
p.l.Unlock()
return nil
}
}
func (p *MessagePool) Reduce() {
p.l.Lock()
p.user -= 1
p.l.Unlock()
}

24
broker/usage.go Normal file
View File

@@ -0,0 +1,24 @@
package broker
var usageStr = `
Usage: hmq [options]
Broker Options:
-w, --worker <number> Worker num to process message, perfer (client num)/10. (default 1024)
-p, --port <port> Use port for clients (default: 1883)
--host <host> Network host to listen on. (default "0.0.0.0")
-ws, --wsport <port> Use port for websocket monitoring
-wsp,--wspath <path> Use path for websocket monitoring
-c, --config <file> Configuration file
Logging Options:
-d, --debug <bool> Enable debugging output (default false)
-D Debug and trace
Cluster Options:
-r, --router <rurl> Router who maintenance cluster info
-cp, --clusterport <cluster-port> Cluster listen port for others
Common Options:
-h, --help Show this message
`

View File

@@ -1,39 +1,28 @@
/* Copyright (c) 2018, joy.zhou <chowyu08@gmail.com>
*/
package broker
import "sync"
type Work struct {
WorkerPool *sync.Pool
Message *Message
}
type Worker struct {
WorkerPool chan chan *Message
MsgChannel chan *Message
quit chan bool
WorkerChannel chan Work
}
func NewWorker(workerPool chan chan *Message) Worker {
return Worker{
WorkerPool: workerPool,
MsgChannel: make(chan *Message),
quit: make(chan bool)}
func NewWorker() Worker {
w := Worker{WorkerChannel: make(chan Work)}
return w.Start()
}
func (w Worker) Start() {
func (w Worker) Start() Worker {
go func() {
for {
// register the current worker into the worker queue.
w.WorkerPool <- w.MsgChannel
select {
case msg := <-w.MsgChannel:
// we have received a work request.
ProcessMessage(msg)
case <-w.quit:
return
}
for work := range w.WorkerChannel {
ProcessMessage(work.Message)
// put the worker back
work.WorkerPool.Put(w)
}
}()
}
// Stop signals the worker to stop listening for work requests.
func (w Worker) Stop() {
go func() {
w.quit <- true
}()
return w
}

View File

@@ -8,44 +8,40 @@ import (
)
var (
// env can be setup at build time with Go Linker. Value could be prod or whatever else for dev env
env string
instance *zap.Logger
logCfg zap.Config
logInstance *zap.Logger
)
// NewDevLogger return a logger for dev builds
func NewDevLogger() (*zap.Logger, error) {
// InitDevLogger instanciate a logger for dev builds
func InitDevLogger() {
logCfg := zap.NewDevelopmentConfig()
return logCfg.Build()
logInstance, _ = logCfg.Build()
}
// NewProdLogger return a logger for production builds
func NewProdLogger() (*zap.Logger, error) {
// InitProdLogger instanciate a logger for production builds
func InitProdLogger() {
logCfg := zap.NewProductionConfig()
logCfg.DisableStacktrace = true
logCfg.Level = zap.NewAtomicLevelAt(zap.InfoLevel)
return logCfg.Build()
logInstance, _ = logCfg.Build()
}
func init() {
func InitLogger(debug bool) {
var err error
var log *zap.Logger
if env == "prod" {
log, err = NewProdLogger()
if debug {
InitDevLogger()
} else {
log, err = NewDevLogger()
InitProdLogger()
}
if err != nil {
panic("Unable to create a logger.")
}
defer log.Sync()
log.Debug("Logger initialization succeeded")
instance = log.Named("hmq")
logInstance.Debug("Logger initialization succeeded")
}
// Get return a *zap.Logger instance
// Get the existing *zap.Logger instance. If none have been created, it'll instanciate de dev logger
func Get() *zap.Logger {
return instance
if logInstance == nil {
InitDevLogger()
}
return logInstance
}

View File

@@ -19,15 +19,11 @@ func TestGet(t *testing.T) {
}
func TestNewDevLogger(t *testing.T) {
logger, err := NewDevLogger()
assert.Nil(t, err)
assert.True(t, logger.Core().Enabled(zap.DebugLevel))
InitDevLogger()
assert.True(t, Get().Core().Enabled(zap.DebugLevel))
}
func TestNewProdLogger(t *testing.T) {
logger, err := NewProdLogger()
assert.Nil(t, err)
assert.False(t, logger.Core().Enabled(zap.DebugLevel))
InitProdLogger()
assert.False(t, Get().Core().Enabled(zap.DebugLevel))
}

18
main.go
View File

@@ -7,36 +7,32 @@ copyright notice and this permission notice appear in all copies.
package main
import (
"fmt"
"os"
"os/signal"
"runtime"
"github.com/fhmq/hmq/broker"
"github.com/fhmq/hmq/logger"
"go.uber.org/zap"
)
var (
log = logger.Get().Named("Main")
)
func main() {
runtime.GOMAXPROCS(runtime.NumCPU())
config, err := broker.ConfigureConfig()
config, err := broker.ConfigureConfig(os.Args[1:])
if err != nil {
log.Error("configure broker config error: ", zap.Error(err))
fmt.Println("configure broker config error: ", err)
return
}
b, err := broker.NewBroker(config)
logger.InitLogger(config.Debug)
b, err := broker.NewBroker(config, logger.Get())
if err != nil {
log.Error("New Broker error: ", zap.Error(err))
fmt.Println("New Broker error: ", err)
return
}
b.Start()
s := waitForSignal()
log.Info("signal received, broker closed.", zap.Any("signal", s))
fmt.Println("signal received, broker closed.", s)
}
func waitForSignal() os.Signal {

166
pool/pool.go Normal file
View File

@@ -0,0 +1,166 @@
package pool
import "time"
const (
// This value is the size of the queue that workers register their
// availability to the dispatcher. There may be hundreds of workers, but
// only a small channel is needed to register some of the workers.
readyQueueSize = 16
// If worker pool receives no new work for this period of time, then stop
// a worker goroutine.
idleTimeoutSec = 5
)
type WorkerPool struct {
maxWorkers int
timeout time.Duration
taskQueue chan func()
readyWorkers chan chan func()
stoppedChan chan struct{}
}
func New(maxWorkers int) *WorkerPool {
// There must be at least one worker.
if maxWorkers < 1 {
maxWorkers = 1
}
// taskQueue is unbuffered since items are always removed immediately.
pool := &WorkerPool{
taskQueue: make(chan func()),
maxWorkers: maxWorkers,
readyWorkers: make(chan chan func(), readyQueueSize),
timeout: time.Second * idleTimeoutSec,
stoppedChan: make(chan struct{}),
}
// Start the task dispatcher.
go pool.dispatch()
return pool
}
func (p *WorkerPool) Stop() {
if p.Stopped() {
return
}
close(p.taskQueue)
<-p.stoppedChan
}
func (p *WorkerPool) Stopped() bool {
select {
case <-p.stoppedChan:
return true
default:
}
return false
}
func (p *WorkerPool) Submit(task func()) {
if task != nil {
p.taskQueue <- task
}
}
func (p *WorkerPool) SubmitWait(task func()) {
if task == nil {
return
}
doneChan := make(chan struct{})
p.taskQueue <- func() {
task()
close(doneChan)
}
<-doneChan
}
func (p *WorkerPool) dispatch() {
defer close(p.stoppedChan)
timeout := time.NewTimer(p.timeout)
var workerCount int
var task func()
var ok bool
var workerTaskChan chan func()
startReady := make(chan chan func())
Loop:
for {
timeout.Reset(p.timeout)
select {
case task, ok = <-p.taskQueue:
if !ok {
break Loop
}
// Got a task to do.
select {
case workerTaskChan = <-p.readyWorkers:
// A worker is ready, so give task to worker.
workerTaskChan <- task
default:
// No workers ready.
// Create a new worker, if not at max.
if workerCount < p.maxWorkers {
workerCount++
go func(t func()) {
startWorker(startReady, p.readyWorkers)
// Submit the task when the new worker.
taskChan := <-startReady
taskChan <- t
}(task)
} else {
// Start a goroutine to submit the task when an existing
// worker is ready.
go func(t func()) {
taskChan := <-p.readyWorkers
taskChan <- t
}(task)
}
}
case <-timeout.C:
// Timed out waiting for work to arrive. Kill a ready worker.
if workerCount > 0 {
select {
case workerTaskChan = <-p.readyWorkers:
// A worker is ready, so kill.
close(workerTaskChan)
workerCount--
default:
// No work, but no ready workers. All workers are busy.
}
}
}
}
// Stop all remaining workers as they become ready.
for workerCount > 0 {
workerTaskChan = <-p.readyWorkers
close(workerTaskChan)
workerCount--
}
}
func startWorker(startReady, readyWorkers chan chan func()) {
go func() {
taskChan := make(chan func())
var task func()
var ok bool
// Register availability on starReady channel.
startReady <- taskChan
for {
// Read task from dispatcher.
task, ok = <-taskChan
if !ok {
// Dispatcher has told worker to stop.
break
}
// Execute the task.
task()
// Register availability on readyWorkers channel.
readyWorkers <- taskChan
}
}()
}