mirror of
https://github.com/fhmq/hmq.git
synced 2026-04-28 04:28:34 +00:00
Compare commits
144 Commits
1.2
...
v5_support
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
ec0886ec49 | ||
|
|
48b146d64e | ||
|
|
5ba8038ac2 | ||
|
|
1c2d20eaf5 | ||
|
|
de2dd52ca4 | ||
|
|
ea619d4f73 | ||
|
|
35944d774d | ||
|
|
cdff42698a | ||
|
|
9fc57423db | ||
|
|
e3fa6573f6 | ||
|
|
4f98faeefc | ||
|
|
805a7b895a | ||
|
|
a94159e79c | ||
|
|
51adb125dd | ||
|
|
1ef00a7a50 | ||
|
|
af6f4d280a | ||
|
|
73acb5a211 | ||
|
|
239655d0a1 | ||
|
|
c083b83f3d | ||
|
|
e9f340c38f | ||
|
|
0daf8bfed9 | ||
|
|
8430749ec4 | ||
|
|
15f3f6d52e | ||
|
|
1c4ead691e | ||
|
|
3aea177ea8 | ||
|
|
b2e79c3bea | ||
|
|
5dc2114daf | ||
|
|
92758c8c85 | ||
|
|
0e3226ece1 | ||
|
|
061b485a3a | ||
|
|
7787d3ca0d | ||
|
|
a95c028cb8 | ||
|
|
c53d8f8a0d | ||
|
|
fa7bf33c60 | ||
|
|
a85e9904c2 | ||
|
|
a501565bab | ||
|
|
bd5bd04e45 | ||
|
|
f8a44be413 | ||
|
|
31864cdf2b | ||
|
|
94ff8e8405 | ||
|
|
bf2b91c535 | ||
|
|
de0cfc6683 | ||
|
|
332c8a59f7 | ||
|
|
108e934a85 | ||
|
|
46b64e5b84 | ||
|
|
ab117be4a8 | ||
|
|
878e7fce3f | ||
|
|
8d486c3a20 | ||
|
|
764d0402f0 | ||
|
|
538bf70f5b | ||
|
|
1d6979189a | ||
|
|
c75ef2d6aa | ||
|
|
068d5e893c | ||
|
|
f66abe5fcb | ||
|
|
ccbe364f9f | ||
|
|
7cc3949bbe | ||
|
|
afe62e0a7d | ||
|
|
b4baac9c81 | ||
|
|
7bf5d52fd9 | ||
|
|
ad7f4bc3f0 | ||
|
|
524a9af060 | ||
|
|
8f187157f3 | ||
|
|
c2248bed2b | ||
|
|
6be79cbe88 | ||
|
|
6cb307d252 | ||
|
|
b8bacb4c3d | ||
|
|
481a61c520 | ||
|
|
4782f76048 | ||
|
|
1a374f9734 | ||
|
|
3f60d23e85 | ||
|
|
3cf90d5231 | ||
|
|
a1bf3d93b2 | ||
|
|
af7db83bdc | ||
|
|
839041e912 | ||
|
|
17dac26996 | ||
|
|
55f1f1aa80 | ||
|
|
ccb7c37b96 | ||
|
|
7e29cc7213 | ||
|
|
1971b5c324 | ||
|
|
fb453e8c0f | ||
|
|
eef900ad2f | ||
|
|
d24e0dac13 | ||
|
|
fd0622710b | ||
|
|
73dd5bb376 | ||
|
|
474c557c7a | ||
|
|
f3e7e5481a | ||
|
|
57fce9c7dc | ||
|
|
995898c5f4 | ||
|
|
2404693bd2 | ||
|
|
68cd5e94a4 | ||
|
|
44fa819f62 | ||
|
|
2b7bb3fcd5 | ||
|
|
4c107c67ab | ||
|
|
896769fd9d | ||
|
|
c7a51fe68f | ||
|
|
a3fc611615 | ||
|
|
e74b9facd1 | ||
|
|
53a79caad9 | ||
|
|
55576c1eb3 | ||
|
|
80b64b147e | ||
|
|
ea055d5929 | ||
|
|
8d8707801f | ||
|
|
fd2974a546 | ||
|
|
72211efedf | ||
|
|
7e15da209e | ||
|
|
69a26f8cd9 | ||
|
|
148738800b | ||
|
|
e4e736d1e2 | ||
|
|
4c5a48a44b | ||
|
|
c6b1f1db42 | ||
|
|
daf4a0e0f5 | ||
|
|
c350d16ca1 | ||
|
|
edc46c1ee6 | ||
|
|
6193be74fa | ||
|
|
90beada459 | ||
|
|
6c7fe6a0f7 | ||
|
|
2b56664d85 | ||
|
|
7547ad3bdc | ||
|
|
84e7fe2490 | ||
|
|
684584b208 | ||
|
|
56fb4a2d54 | ||
|
|
5ed4728575 | ||
|
|
c0fea6a5ba | ||
|
|
47500910e1 | ||
|
|
0ff20b6ee2 | ||
|
|
7155667f6c | ||
|
|
83db82cdcc | ||
|
|
b3653bcfb1 | ||
|
|
221d00480e | ||
|
|
91733bf91e | ||
|
|
ef252550dc | ||
|
|
1058256235 | ||
|
|
5a569f14a3 | ||
|
|
93b21777ff | ||
|
|
dcf2934e1b | ||
|
|
d9e6e216b0 | ||
|
|
ca3951769a | ||
|
|
0439e7ce90 | ||
|
|
dc0f2185ab | ||
|
|
7462afcfb5 | ||
|
|
114e6f901e | ||
|
|
0cb51bd37a | ||
|
|
819b4725f2 | ||
|
|
85bdeccbfc |
28
.github/workflows/go.yml
vendored
Normal file
28
.github/workflows/go.yml
vendored
Normal file
@@ -0,0 +1,28 @@
|
||||
# This workflow will build a golang project
|
||||
# For more information see: https://docs.github.com/en/actions/automating-builds-and-tests/building-and-testing-go
|
||||
|
||||
name: Go
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ "master" ]
|
||||
pull_request:
|
||||
branches: [ "master" ]
|
||||
|
||||
jobs:
|
||||
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: 1.19
|
||||
|
||||
- name: Build
|
||||
run: go build -v ./...
|
||||
|
||||
- name: Test
|
||||
run: go test -v ./...
|
||||
18
.github/workflows/macos.yml
vendored
Normal file
18
.github/workflows/macos.yml
vendored
Normal file
@@ -0,0 +1,18 @@
|
||||
name: MacOS build
|
||||
|
||||
on: [push, pull_request]
|
||||
|
||||
jobs:
|
||||
|
||||
build:
|
||||
runs-on: macos-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.18
|
||||
|
||||
- name: Build
|
||||
run: go build -v ./...
|
||||
18
.github/workflows/ubuntu.yml
vendored
Normal file
18
.github/workflows/ubuntu.yml
vendored
Normal file
@@ -0,0 +1,18 @@
|
||||
name: Ubuntu build
|
||||
|
||||
on: [push, pull_request]
|
||||
|
||||
jobs:
|
||||
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.18
|
||||
|
||||
- name: Build
|
||||
run: go build -v ./...
|
||||
18
.github/workflows/windows.yml
vendored
Normal file
18
.github/workflows/windows.yml
vendored
Normal file
@@ -0,0 +1,18 @@
|
||||
name: Windows build
|
||||
|
||||
on: [push, pull_request]
|
||||
|
||||
jobs:
|
||||
|
||||
build:
|
||||
runs-on: windows-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.18
|
||||
|
||||
- name: Build
|
||||
run: go build -v ./...
|
||||
10
.gitignore
vendored
10
.gitignore
vendored
@@ -1,3 +1,13 @@
|
||||
hmq
|
||||
log
|
||||
log/*
|
||||
*.test
|
||||
# ide
|
||||
.idea
|
||||
.vscode/settings.json
|
||||
.pre-commit-config.yaml
|
||||
hmq.exe
|
||||
*.sw*
|
||||
*.swo
|
||||
*.swp
|
||||
*.swn
|
||||
|
||||
17
Dockerfile
17
Dockerfile
@@ -1,11 +1,12 @@
|
||||
FROM alpine
|
||||
COPY hmq /
|
||||
COPY ssl /ssl
|
||||
COPY conf /conf
|
||||
FROM golang:1.18 as builder
|
||||
WORKDIR /go/src/github.com/fhmq/hmq
|
||||
COPY . .
|
||||
RUN CGO_ENABLED=0 go build -o hmq -a -ldflags '-extldflags "-static"' .
|
||||
|
||||
|
||||
FROM alpine:3.17.3
|
||||
WORKDIR /
|
||||
COPY --from=builder /go/src/github.com/fhmq/hmq/hmq .
|
||||
EXPOSE 1883
|
||||
EXPOSE 1888
|
||||
EXPOSE 8883
|
||||
EXPOSE 1993
|
||||
|
||||
CMD ["/hmq"]
|
||||
ENTRYPOINT ["/hmq"]
|
||||
|
||||
201
LICENSE
Normal file
201
LICENSE
Normal file
@@ -0,0 +1,201 @@
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "{}"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright {yyyy} {name of copyright owner}
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
144
README.md
144
README.md
@@ -1,18 +1,43 @@
|
||||
|
||||
Free and High Performance MQTT Broker
|
||||
============
|
||||
|
||||
## About
|
||||
Golang MQTT Broker, Version 3.1.1, and Compatible
|
||||
for [eclipse paho client](https://github.com/eclipse?utf8=%E2%9C%93&q=mqtt&type=&language=)
|
||||
for [eclipse paho client](https://github.com/eclipse?utf8=%E2%9C%93&q=mqtt&type=&language=) and mosquitto-client
|
||||
|
||||
## RUNNING
|
||||
```bash
|
||||
$ git clone https://github.com/fhmq/hmq.git
|
||||
$ cd hmq
|
||||
$ go get github.com/fhmq/hmq
|
||||
$ cd $GOPATH/github.com/fhmq/hmq
|
||||
$ go run main.go
|
||||
```
|
||||
|
||||
### broker.config
|
||||
## Usage of hmq:
|
||||
~~~
|
||||
Usage: hmq [options]
|
||||
|
||||
Broker Options:
|
||||
-w, --worker <number> Worker num to process message, perfer (client num)/10. (default 1024)
|
||||
-p, --port <port> Use port for clients (default: 1883)
|
||||
--host <host> Network host to listen on. (default "0.0.0.0")
|
||||
-ws, --wsport <port> Use port for websocket monitoring
|
||||
-wsp,--wspath <path> Use path for websocket monitoring
|
||||
-c, --config <file> Configuration file
|
||||
|
||||
Logging Options:
|
||||
-d, --debug <bool> Enable debugging output (default false)
|
||||
-D Debug enabled
|
||||
|
||||
Cluster Options:
|
||||
-r, --router <rurl> Router who maintenance cluster info
|
||||
-cp, --clusterport <cluster-port> Cluster listen port for others
|
||||
|
||||
Common Options:
|
||||
-h, --help Show this message
|
||||
~~~
|
||||
|
||||
### hmq.config
|
||||
~~~
|
||||
{
|
||||
"workerNum": 4096,
|
||||
@@ -20,9 +45,9 @@ $ go run main.go
|
||||
"host": "0.0.0.0",
|
||||
"cluster": {
|
||||
"host": "0.0.0.0",
|
||||
"port": "1993",
|
||||
"routers": ["10.10.0.11:1993","10.10.0.12:1993"]
|
||||
"port": "1993"
|
||||
},
|
||||
"router": "127.0.0.1:9888",
|
||||
"wsPort": "1888",
|
||||
"wsPath": "/ws",
|
||||
"wsTLS": true,
|
||||
@@ -34,8 +59,10 @@ $ go run main.go
|
||||
"certFile": "tls/server/cert.pem",
|
||||
"keyFile": "tls/server/key.pem"
|
||||
},
|
||||
"acl":true,
|
||||
"aclConf":"conf/acl.conf"
|
||||
"plugins": {
|
||||
"auth": "authhttp",
|
||||
"bridge": "kafka"
|
||||
}
|
||||
}
|
||||
~~~
|
||||
|
||||
@@ -51,72 +78,47 @@ $ go run main.go
|
||||
|
||||
* Supports will messages
|
||||
|
||||
* Queue subscribe
|
||||
|
||||
* Websocket Support
|
||||
|
||||
* TLS/SSL Support
|
||||
|
||||
* Flexible ACL
|
||||
* Auth Support
|
||||
* Auth Connect
|
||||
* Auth ACL
|
||||
* Cache Support
|
||||
|
||||
### QUEUE SUBSCRIBE
|
||||
* Kafka Bridge Support
|
||||
* Action Deliver
|
||||
* Regexp Deliver
|
||||
|
||||
* HTTP API
|
||||
* Disconnect Connect (future more)
|
||||
|
||||
### Share SUBSCRIBE
|
||||
~~~
|
||||
| Prefix | Examples |
|
||||
| ------------- |---------------------------------|
|
||||
| $queue/ | mosquitto_sub -t ‘$queue/topic’ |
|
||||
| Prefix | Examples | Publish |
|
||||
| ------------------- |-------------------------------------------|--------------------------- --|
|
||||
| $share/<group>/topic | mosquitto_sub -t ‘$share/<group>/topic’ | mosquitto_pub -t ‘topic’ |
|
||||
~~~
|
||||
|
||||
### ACL Configure
|
||||
#### The ACL rules define:
|
||||
~~~
|
||||
Allow | type | value | pubsub | Topics
|
||||
~~~
|
||||
#### ACL Config
|
||||
~~~
|
||||
## type clientid , username, ipaddr
|
||||
##pub 1 , sub 2, pubsub 3
|
||||
## %c is clientid , %u is username
|
||||
allow ip 127.0.0.1 2 $SYS/#
|
||||
allow clientid 0001 3 #
|
||||
allow username admin 3 #
|
||||
allow username joy 3 /test,hello/world
|
||||
allow clientid * 1 toCloud/%c
|
||||
allow username * 1 toCloud/%u
|
||||
deny clientid * 3 #
|
||||
~~~
|
||||
### Cluster
|
||||
```bash
|
||||
1, start router for hmq (https://github.com/fhmq/router.git)
|
||||
$ go get github.com/fhmq/router
|
||||
$ cd $GOPATH/github.com/fhmq/router
|
||||
$ go run main.go
|
||||
2, config router in hmq.config ("router": "127.0.0.1:9888")
|
||||
|
||||
```
|
||||
Other Version Of Cluster Based On gRPC: [click here](https://github.com/fhmq/rhmq)
|
||||
|
||||
~~~
|
||||
#allow local sub $SYS topic
|
||||
allow ip 127.0.0.1 2 $SYS/#
|
||||
~~~
|
||||
~~~
|
||||
#allow client who's id with 0001 or username with admin pub sub all topic
|
||||
allow clientid 0001 3 #
|
||||
allow username admin 3 #
|
||||
~~~
|
||||
~~~
|
||||
#allow client with the username joy can pub sub topic '/test' and 'hello/world'
|
||||
allow username joy 3 /test,hello/world
|
||||
~~~
|
||||
~~~
|
||||
#allow all client pub the topic toCloud/{clientid/username}
|
||||
allow clientid * 1 toCloud/%c
|
||||
allow username * 1 toCloud/%u
|
||||
~~~
|
||||
~~~
|
||||
#deny all client pub sub all topic
|
||||
deny clientid * 3 #
|
||||
~~~
|
||||
Client match acl rule one by one
|
||||
~~~
|
||||
--------- --------- ---------
|
||||
Client -> | Rule1 | --nomatch--> | Rule2 | --nomatch--> | Rule3 | -->
|
||||
--------- --------- ---------
|
||||
| | |
|
||||
match match match
|
||||
\|/ \|/ \|/
|
||||
allow | deny allow | deny allow | deny
|
||||
~~~
|
||||
### Online/Offline Notification
|
||||
```bash
|
||||
topic:
|
||||
$SYS/broker/connection/clients/<clientID>
|
||||
payload:
|
||||
{"clientID":"client001","online":true/false,"timestamp":"2018-10-25T09:32:32Z"}
|
||||
```
|
||||
|
||||
## Performance
|
||||
|
||||
@@ -129,4 +131,14 @@ Client -> | Rule1 | --nomatch--> | Rule2 | --nomatch--> | Rule3 | -->
|
||||
|
||||
## License
|
||||
|
||||
* Apache License Version 2.0
|
||||
* Apache License Version 2.0
|
||||
|
||||
|
||||
## Reference
|
||||
|
||||
* Surgermq.(https://github.com/zentures/surgemq)
|
||||
|
||||
## Benchmark Tool
|
||||
|
||||
* https://github.com/inovex/mqtt-stresser
|
||||
* https://github.com/krylovsk/mqtt-benchmark
|
||||
|
||||
@@ -1,80 +1,40 @@
|
||||
package broker
|
||||
|
||||
import (
|
||||
"hmq/lib/acl"
|
||||
"strings"
|
||||
|
||||
log "github.com/cihub/seelog"
|
||||
"github.com/fsnotify/fsnotify"
|
||||
)
|
||||
|
||||
const (
|
||||
PUB = 1
|
||||
SUB = 2
|
||||
SUB = "1"
|
||||
PUB = "2"
|
||||
)
|
||||
|
||||
func (c *client) CheckTopicAuth(typ int, topic string) bool {
|
||||
if c.typ != CLIENT || !c.broker.config.Acl {
|
||||
return true
|
||||
}
|
||||
if strings.HasPrefix(topic, "$queue/") {
|
||||
topic = string([]byte(topic)[7:])
|
||||
if topic == "" {
|
||||
return false
|
||||
func (b *Broker) CheckTopicAuth(action, clientID, username, ip, topic string) bool {
|
||||
if b.auth != nil {
|
||||
if strings.HasPrefix(topic, "$SYS/broker/connection/clients/") {
|
||||
return true
|
||||
}
|
||||
|
||||
if strings.HasPrefix(topic, "$share/") && action == SUB {
|
||||
substr := groupCompile.FindStringSubmatch(topic)
|
||||
if len(substr) != 3 {
|
||||
return false
|
||||
}
|
||||
topic = substr[2]
|
||||
}
|
||||
|
||||
return b.auth.CheckACL(action, clientID, username, ip, topic)
|
||||
}
|
||||
ip := c.info.remoteIP
|
||||
username := string(c.info.username)
|
||||
clientid := string(c.info.clientID)
|
||||
aclInfo := c.broker.AclConfig
|
||||
return acl.CheckTopicAuth(aclInfo, typ, ip, username, clientid, topic)
|
||||
|
||||
return true
|
||||
|
||||
}
|
||||
|
||||
var (
|
||||
watchList = []string{"./conf"}
|
||||
)
|
||||
|
||||
func (b *Broker) handleFsEvent(event fsnotify.Event) error {
|
||||
switch event.Name {
|
||||
case b.config.AclConf:
|
||||
if event.Op&fsnotify.Write == fsnotify.Write ||
|
||||
event.Op&fsnotify.Create == fsnotify.Create {
|
||||
log.Info("text:handling acl config change event:", event)
|
||||
aclconfig, err := acl.AclConfigLoad(event.Name)
|
||||
if err != nil {
|
||||
log.Error("aclconfig change failed, load acl conf error: ", err)
|
||||
return err
|
||||
}
|
||||
b.AclConfig = aclconfig
|
||||
}
|
||||
func (b *Broker) CheckConnectAuth(clientID, username, password string) bool {
|
||||
if b.auth != nil {
|
||||
return b.auth.CheckConnect(clientID, username, password)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *Broker) StartAclWatcher() {
|
||||
go func() {
|
||||
wch, e := fsnotify.NewWatcher()
|
||||
if e != nil {
|
||||
log.Error("start monitor acl config file error,", e)
|
||||
return
|
||||
}
|
||||
defer wch.Close()
|
||||
return true
|
||||
|
||||
for _, i := range watchList {
|
||||
if err := wch.Add(i); err != nil {
|
||||
log.Error("start monitor acl config file error,", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
log.Info("watching acl config file change...")
|
||||
for {
|
||||
select {
|
||||
case evt := <-wch.Events:
|
||||
b.handleFsEvent(evt)
|
||||
case err := <-wch.Errors:
|
||||
log.Error("error:", err.Error())
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
17
broker/bridge.go
Normal file
17
broker/bridge.go
Normal file
@@ -0,0 +1,17 @@
|
||||
package broker
|
||||
|
||||
import (
|
||||
"github.com/fhmq/hmq/plugins/bridge"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
func (b *Broker) Publish(e *bridge.Elements) bool {
|
||||
if b.bridgeMQ != nil {
|
||||
cost, err := b.bridgeMQ.Publish(e)
|
||||
if err != nil {
|
||||
log.Error("send message to mq error.", zap.Error(err))
|
||||
}
|
||||
return cost
|
||||
}
|
||||
return false
|
||||
}
|
||||
779
broker/broker.go
779
broker/broker.go
File diff suppressed because it is too large
Load Diff
988
broker/client.go
988
broker/client.go
File diff suppressed because it is too large
Load Diff
208
broker/comm.go
208
broker/comm.go
@@ -1,15 +1,14 @@
|
||||
package broker
|
||||
|
||||
import (
|
||||
"crypto/md5"
|
||||
"crypto/rand"
|
||||
"encoding/base64"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"io"
|
||||
"reflect"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
jsoniter "github.com/json-iterator/go"
|
||||
"go.uber.org/zap"
|
||||
|
||||
"github.com/eclipse/paho.mqtt.golang/packets"
|
||||
uuid "github.com/google/uuid"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -46,47 +45,6 @@ const (
|
||||
QosFailure = 0x80
|
||||
)
|
||||
|
||||
func SubscribeTopicCheckAndSpilt(topic string) ([]string, error) {
|
||||
if strings.Index(topic, "#") != -1 && strings.Index(topic, "#") != len(topic)-1 {
|
||||
return nil, errors.New("Topic format error with index of #")
|
||||
}
|
||||
re := strings.Split(topic, "/")
|
||||
for i, v := range re {
|
||||
if i != 0 && i != (len(re)-1) {
|
||||
if v == "" {
|
||||
return nil, errors.New("Topic format error with index of //")
|
||||
}
|
||||
if strings.Contains(v, "+") && v != "+" {
|
||||
return nil, errors.New("Topic format error with index of +")
|
||||
}
|
||||
} else {
|
||||
if v == "" {
|
||||
re[i] = "/"
|
||||
}
|
||||
}
|
||||
}
|
||||
return re, nil
|
||||
|
||||
}
|
||||
|
||||
func PublishTopicCheckAndSpilt(topic string) ([]string, error) {
|
||||
if strings.Index(topic, "#") != -1 || strings.Index(topic, "+") != -1 {
|
||||
return nil, errors.New("Publish Topic format error with + and #")
|
||||
}
|
||||
re := strings.Split(topic, "/")
|
||||
for i, v := range re {
|
||||
if v == "" {
|
||||
if i != 0 && i != (len(re)-1) {
|
||||
return nil, errors.New("Topic format error with index of //")
|
||||
} else {
|
||||
re[i] = "/"
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
return re, nil
|
||||
}
|
||||
|
||||
func equal(k1, k2 interface{}) bool {
|
||||
if reflect.TypeOf(k1) != reflect.TypeOf(k2) {
|
||||
return false
|
||||
@@ -132,13 +90,151 @@ func equal(k1, k2 interface{}) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func GenUniqueId() string {
|
||||
b := make([]byte, 48)
|
||||
if _, err := io.ReadFull(rand.Reader, b); err != nil {
|
||||
return ""
|
||||
func addSubMap(m map[string]uint64, topic string) {
|
||||
subNum, exist := m[topic]
|
||||
if exist {
|
||||
m[topic] = subNum + 1
|
||||
} else {
|
||||
m[topic] = 1
|
||||
}
|
||||
h := md5.New()
|
||||
h.Write([]byte(base64.URLEncoding.EncodeToString(b)))
|
||||
return hex.EncodeToString(h.Sum(nil))
|
||||
// return GetMd5String()
|
||||
}
|
||||
|
||||
func delSubMap(m map[string]uint64, topic string) uint64 {
|
||||
subNum, exist := m[topic]
|
||||
if exist {
|
||||
if subNum > 1 {
|
||||
m[topic] = subNum - 1
|
||||
return subNum - 1
|
||||
}
|
||||
} else {
|
||||
m[topic] = 0
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func GenUniqueId() string {
|
||||
id, err := uuid.NewRandom()
|
||||
if err != nil {
|
||||
log.Error("uuid.NewRandom() returned an error: " + err.Error())
|
||||
}
|
||||
return id.String()
|
||||
}
|
||||
|
||||
func wrapPublishPacket(packet *packets.PublishPacket) *packets.PublishPacket {
|
||||
p := packet.Copy()
|
||||
wrapPayload := map[string]interface{}{
|
||||
"message_id": GenUniqueId(),
|
||||
"payload": string(p.Payload),
|
||||
}
|
||||
b, _ := json.Marshal(wrapPayload)
|
||||
p.Payload = b
|
||||
return p
|
||||
}
|
||||
|
||||
func unWrapPublishPacket(packet *packets.PublishPacket) *packets.PublishPacket {
|
||||
p := packet.Copy()
|
||||
if payload := jsoniter.Get(p.Payload, "payload").ToString(); payload != "" {
|
||||
p.Payload = []byte(payload)
|
||||
}
|
||||
return p
|
||||
}
|
||||
|
||||
func publish(sub *subscription, packet *packets.PublishPacket) {
|
||||
switch packet.Qos {
|
||||
case QosAtMostOnce:
|
||||
err := sub.client.WriterPacket(packet)
|
||||
if err != nil {
|
||||
log.Error("process message for psub error, ", zap.Error(err))
|
||||
}
|
||||
case QosAtLeastOnce, QosExactlyOnce:
|
||||
sub.client.inflightMu.Lock()
|
||||
sub.client.inflight[packet.MessageID] = &inflightElem{status: Publish, packet: packet, timestamp: time.Now().Unix()}
|
||||
sub.client.inflightMu.Unlock()
|
||||
err := sub.client.WriterPacket(packet)
|
||||
if err != nil {
|
||||
log.Error("process message for psub error, ", zap.Error(err))
|
||||
}
|
||||
sub.client.ensureRetryTimer()
|
||||
default:
|
||||
log.Error("publish with unknown qos", zap.String("ClientID", sub.client.info.clientID))
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// timer for retry delivery
|
||||
func (c *client) ensureRetryTimer(interval ...int64) {
|
||||
|
||||
c.retryTimerLock.Lock()
|
||||
defer c.retryTimerLock.Unlock()
|
||||
|
||||
if c.retryTimer != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if len(interval) > 1 {
|
||||
return
|
||||
}
|
||||
timerInterval := retryInterval
|
||||
if len(interval) == 1 {
|
||||
timerInterval = interval[0]
|
||||
}
|
||||
|
||||
c.retryTimer = time.AfterFunc(time.Duration(timerInterval)*time.Second, c.retryDelivery)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (c *client) resetRetryTimer() {
|
||||
// lock mutex before reading retryTimer
|
||||
c.retryTimerLock.Lock()
|
||||
defer c.retryTimerLock.Unlock()
|
||||
|
||||
if c.retryTimer == nil {
|
||||
return
|
||||
}
|
||||
|
||||
// reset timer
|
||||
c.retryTimer = nil
|
||||
}
|
||||
|
||||
func (c *client) retryDelivery() {
|
||||
c.resetRetryTimer()
|
||||
c.inflightMu.RLock()
|
||||
ilen := len(c.inflight)
|
||||
|
||||
if c.mu.Lock(); c.conn == nil || ilen == 0 { //Reset timer when client offline OR inflight is empty
|
||||
c.inflightMu.RUnlock()
|
||||
c.mu.Unlock()
|
||||
return
|
||||
}
|
||||
|
||||
// copy the to be retried elements out of the map to only hold the lock for a short time and use the new slice later to iterate
|
||||
// through them
|
||||
toRetryEle := make([]*inflightElem, 0, ilen)
|
||||
for _, infEle := range c.inflight {
|
||||
toRetryEle = append(toRetryEle, infEle)
|
||||
}
|
||||
c.inflightMu.RUnlock()
|
||||
now := time.Now().Unix()
|
||||
|
||||
for _, infEle := range toRetryEle {
|
||||
age := now - infEle.timestamp
|
||||
if age >= retryInterval {
|
||||
if infEle.status == Publish {
|
||||
c.WriterPacket(infEle.packet)
|
||||
infEle.timestamp = now
|
||||
} else if infEle.status == Pubrel {
|
||||
pubrel := packets.NewControlPacket(packets.Pubrel).(*packets.PubrelPacket)
|
||||
pubrel.MessageID = infEle.packet.MessageID
|
||||
c.WriterPacket(pubrel)
|
||||
infEle.timestamp = now
|
||||
}
|
||||
} else {
|
||||
if age < 0 {
|
||||
age = 0
|
||||
}
|
||||
c.ensureRetryTimer(retryInterval - age)
|
||||
}
|
||||
}
|
||||
c.ensureRetryTimer()
|
||||
}
|
||||
|
||||
188
broker/config.go
188
broker/config.go
@@ -3,36 +3,51 @@ package broker
|
||||
import (
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
|
||||
log "github.com/cihub/seelog"
|
||||
"github.com/fhmq/hmq/logger"
|
||||
"github.com/fhmq/hmq/plugins/auth"
|
||||
"github.com/fhmq/hmq/plugins/bridge"
|
||||
jsoniter "github.com/json-iterator/go"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
const (
|
||||
CONFIGFILE = "conf/hmq.config"
|
||||
)
|
||||
var json = jsoniter.ConfigCompatibleWithStandardLibrary
|
||||
|
||||
type Config struct {
|
||||
Worker int `json:"workerNum"`
|
||||
Host string `json:"host"`
|
||||
Port string `json:"port"`
|
||||
Cluster RouteInfo `json:"cluster"`
|
||||
TlsHost string `json:"tlsHost"`
|
||||
TlsPort string `json:"tlsPort"`
|
||||
WsPath string `json:"wsPath"`
|
||||
WsPort string `json:"wsPort"`
|
||||
WsTLS bool `json:"wsTLS"`
|
||||
TlsInfo TLSInfo `json:"tlsInfo"`
|
||||
Acl bool `json:"acl"`
|
||||
AclConf string `json:"aclConf"`
|
||||
Worker int `json:"workerNum"`
|
||||
HTTPPort string `json:"httpPort"`
|
||||
Host string `json:"host"`
|
||||
V5Port string `json:"v5Port"`
|
||||
Port string `json:"port"`
|
||||
Cluster RouteInfo `json:"cluster"`
|
||||
Router string `json:"router"`
|
||||
TlsHost string `json:"tlsHost"`
|
||||
TlsPort string `json:"tlsPort"`
|
||||
WsPath string `json:"wsPath"`
|
||||
WsPort string `json:"wsPort"`
|
||||
WsTLS bool `json:"wsTLS"`
|
||||
TlsInfo TLSInfo `json:"tlsInfo"`
|
||||
Debug bool `json:"debug"`
|
||||
Plugin Plugins `json:"plugins"`
|
||||
}
|
||||
|
||||
type Plugins struct {
|
||||
Auth auth.Auth
|
||||
Bridge bridge.BridgeMQ
|
||||
}
|
||||
|
||||
type NamedPlugins struct {
|
||||
Auth string
|
||||
Bridge string
|
||||
}
|
||||
|
||||
type RouteInfo struct {
|
||||
Host string `json:"host"`
|
||||
Port string `json:"port"`
|
||||
Routes []string `json:"routes"`
|
||||
Host string `json:"host"`
|
||||
Port string `json:"port"`
|
||||
}
|
||||
|
||||
type TLSInfo struct {
|
||||
@@ -42,11 +57,98 @@ type TLSInfo struct {
|
||||
KeyFile string `json:"keyFile"`
|
||||
}
|
||||
|
||||
func LoadConfig() (*Config, error) {
|
||||
var DefaultConfig *Config = &Config{
|
||||
Worker: 4096,
|
||||
Host: "0.0.0.0",
|
||||
Port: "1883",
|
||||
}
|
||||
|
||||
content, err := ioutil.ReadFile(CONFIGFILE)
|
||||
var (
|
||||
log = logger.Prod().Named("broker")
|
||||
)
|
||||
|
||||
func showHelp() {
|
||||
fmt.Printf("%s\n", usageStr)
|
||||
os.Exit(0)
|
||||
}
|
||||
|
||||
func ConfigureConfig(args []string) (*Config, error) {
|
||||
config := &Config{}
|
||||
var (
|
||||
help bool
|
||||
configFile string
|
||||
)
|
||||
fs := flag.NewFlagSet("hmq-broker", flag.ExitOnError)
|
||||
fs.Usage = showHelp
|
||||
|
||||
fs.BoolVar(&help, "h", false, "Show this message.")
|
||||
fs.BoolVar(&help, "help", false, "Show this message.")
|
||||
fs.IntVar(&config.Worker, "w", 1024, "worker num to process message, perfer (client num)/10.")
|
||||
fs.IntVar(&config.Worker, "worker", 1024, "worker num to process message, perfer (client num)/10.")
|
||||
fs.StringVar(&config.HTTPPort, "httpport", "8080", "Port to listen on.")
|
||||
fs.StringVar(&config.HTTPPort, "hp", "8080", "Port to listen on.")
|
||||
fs.StringVar(&config.Port, "port", "1883", "Port to listen on.")
|
||||
fs.StringVar(&config.Port, "p", "1883", "Port to listen on.")
|
||||
fs.StringVar(&config.V5Port, "v5port", "", "Port v5 to listen on.")
|
||||
fs.StringVar(&config.V5Port, "p5", "", "Port v5 to listen on.")
|
||||
fs.StringVar(&config.Host, "host", "0.0.0.0", "Network host to listen on")
|
||||
fs.StringVar(&config.Cluster.Port, "cp", "", "Cluster port from which members can connect.")
|
||||
fs.StringVar(&config.Cluster.Port, "clusterport", "", "Cluster port from which members can connect.")
|
||||
fs.StringVar(&config.Router, "r", "", "Router who maintenance cluster info")
|
||||
fs.StringVar(&config.Router, "router", "", "Router who maintenance cluster info")
|
||||
fs.StringVar(&config.WsPort, "ws", "", "port for ws to listen on")
|
||||
fs.StringVar(&config.WsPort, "wsport", "", "port for ws to listen on")
|
||||
fs.StringVar(&config.WsPath, "wsp", "", "path for ws to listen on")
|
||||
fs.StringVar(&config.WsPath, "wspath", "", "path for ws to listen on")
|
||||
fs.StringVar(&configFile, "config", "", "config file for hmq")
|
||||
fs.StringVar(&configFile, "c", "", "config file for hmq")
|
||||
fs.BoolVar(&config.Debug, "debug", false, "enable Debug logging.")
|
||||
fs.BoolVar(&config.Debug, "d", false, "enable Debug logging.")
|
||||
|
||||
fs.Bool("D", true, "enable Debug logging.")
|
||||
|
||||
if err := fs.Parse(args); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if help {
|
||||
showHelp()
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
fs.Visit(func(f *flag.Flag) {
|
||||
switch f.Name {
|
||||
case "D":
|
||||
config.Debug = true
|
||||
}
|
||||
})
|
||||
|
||||
if configFile != "" {
|
||||
tmpConfig, e := LoadConfig(configFile)
|
||||
if e != nil {
|
||||
return nil, e
|
||||
} else {
|
||||
config = tmpConfig
|
||||
}
|
||||
}
|
||||
|
||||
if config.Debug {
|
||||
log = logger.Debug().Named("broker")
|
||||
}
|
||||
|
||||
if err := config.check(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return config, nil
|
||||
|
||||
}
|
||||
|
||||
func LoadConfig(filename string) (*Config, error) {
|
||||
|
||||
content, err := os.ReadFile(filename)
|
||||
if err != nil {
|
||||
log.Error("Read config file error: ", err)
|
||||
// log.Error("Read config file error: ", zap.Error(err))
|
||||
return nil, err
|
||||
}
|
||||
// log.Info(string(content))
|
||||
@@ -54,16 +156,30 @@ func LoadConfig() (*Config, error) {
|
||||
var config Config
|
||||
err = json.Unmarshal(content, &config)
|
||||
if err != nil {
|
||||
log.Error("Unmarshal config file error: ", err)
|
||||
// log.Error("Unmarshal config file error: ", zap.Error(err))
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &config, nil
|
||||
}
|
||||
|
||||
func (p *Plugins) UnmarshalJSON(b []byte) error {
|
||||
var named NamedPlugins
|
||||
err := json.Unmarshal(b, &named)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
p.Auth = auth.NewAuth(named.Auth)
|
||||
p.Bridge = bridge.NewBridgeMQ(named.Bridge)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (config *Config) check() error {
|
||||
|
||||
if config.Worker == 0 {
|
||||
config.Worker = 1024
|
||||
}
|
||||
|
||||
WorkNum = config.Worker
|
||||
|
||||
if config.Port != "" {
|
||||
if config.Host == "" {
|
||||
config.Host = "0.0.0.0"
|
||||
@@ -75,29 +191,33 @@ func LoadConfig() (*Config, error) {
|
||||
config.Cluster.Host = "0.0.0.0"
|
||||
}
|
||||
}
|
||||
if config.Router != "" {
|
||||
if config.Cluster.Port == "" {
|
||||
return errors.New("cluster port is null")
|
||||
}
|
||||
}
|
||||
|
||||
if config.TlsPort != "" {
|
||||
if config.TlsInfo.CertFile == "" || config.TlsInfo.KeyFile == "" {
|
||||
log.Error("tls config error, no cert or key file.")
|
||||
return nil, err
|
||||
return errors.New("tls config error, no cert or key file.")
|
||||
}
|
||||
if config.TlsHost == "" {
|
||||
config.TlsHost = "0.0.0.0"
|
||||
}
|
||||
}
|
||||
|
||||
return &config, nil
|
||||
return nil
|
||||
}
|
||||
|
||||
func NewTLSConfig(tlsInfo TLSInfo) (*tls.Config, error) {
|
||||
|
||||
cert, err := tls.LoadX509KeyPair(tlsInfo.CertFile, tlsInfo.KeyFile)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error parsing X509 certificate/key pair: %v", err)
|
||||
return nil, fmt.Errorf("error parsing X509 certificate/key pair: %v", zap.Error(err))
|
||||
}
|
||||
cert.Leaf, err = x509.ParseCertificate(cert.Certificate[0])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error parsing certificate: %v", err)
|
||||
return nil, fmt.Errorf("error parsing certificate: %v", zap.Error(err))
|
||||
}
|
||||
|
||||
// Create TLSConfig
|
||||
@@ -113,12 +233,12 @@ func NewTLSConfig(tlsInfo TLSInfo) (*tls.Config, error) {
|
||||
}
|
||||
// Add in CAs if applicable.
|
||||
if tlsInfo.CaFile != "" {
|
||||
rootPEM, err := ioutil.ReadFile(tlsInfo.CaFile)
|
||||
rootPEM, err := os.ReadFile(tlsInfo.CaFile)
|
||||
if err != nil || rootPEM == nil {
|
||||
return nil, err
|
||||
}
|
||||
pool := x509.NewCertPool()
|
||||
ok := pool.AppendCertsFromPEM([]byte(rootPEM))
|
||||
ok := pool.AppendCertsFromPEM(rootPEM)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("failed to parse root ca certificate")
|
||||
}
|
||||
|
||||
@@ -1,44 +0,0 @@
|
||||
package broker
|
||||
|
||||
var WorkNum int
|
||||
|
||||
type Dispatcher struct {
|
||||
WorkerPool chan chan *Message
|
||||
}
|
||||
|
||||
func StartDispatcher() {
|
||||
InitMessagePool()
|
||||
dispatcher := NewDispatcher()
|
||||
dispatcher.Run()
|
||||
}
|
||||
|
||||
func (d *Dispatcher) Run() {
|
||||
// starting n number of workers
|
||||
for i := 0; i < WorkNum; i++ {
|
||||
worker := NewWorker(d.WorkerPool)
|
||||
worker.Start()
|
||||
}
|
||||
go d.dispatch()
|
||||
}
|
||||
|
||||
func NewDispatcher() *Dispatcher {
|
||||
pool := make(chan chan *Message, WorkNum)
|
||||
return &Dispatcher{WorkerPool: pool}
|
||||
}
|
||||
|
||||
func (d *Dispatcher) dispatch() {
|
||||
for i := 0; i < (MessagePoolNum + 2); i++ {
|
||||
go func(idx int) {
|
||||
for {
|
||||
select {
|
||||
case msg := <-MSGPool[idx].queue:
|
||||
go func(msg *Message) {
|
||||
msgChannel := <-d.WorkerPool
|
||||
msgChannel <- msg
|
||||
}(msg)
|
||||
}
|
||||
}
|
||||
}(i)
|
||||
}
|
||||
|
||||
}
|
||||
65
broker/http.go
Normal file
65
broker/http.go
Normal file
@@ -0,0 +1,65 @@
|
||||
package broker
|
||||
|
||||
import (
|
||||
"github.com/gin-gonic/gin"
|
||||
)
|
||||
|
||||
const (
|
||||
CONNECTIONS = "api/v1/connections"
|
||||
)
|
||||
|
||||
type ConnClient struct {
|
||||
Info `json:"info"`
|
||||
LastMsgTime int64 `json:"lastMsg"`
|
||||
}
|
||||
|
||||
type resp struct {
|
||||
Code int `json:"code,omitempty"`
|
||||
Clients []ConnClient `json:"clients,omitempty"`
|
||||
}
|
||||
|
||||
func InitHTTPMoniter(b *Broker) {
|
||||
gin.SetMode(gin.ReleaseMode)
|
||||
router := gin.Default()
|
||||
router.DELETE(CONNECTIONS + "/:clientid", func(c *gin.Context) {
|
||||
clientid := c.Param("clientid")
|
||||
cli, ok := b.clients.Load(clientid)
|
||||
if ok {
|
||||
conn, success := cli.(*client)
|
||||
if success {
|
||||
conn.Close()
|
||||
}
|
||||
}
|
||||
r := resp{Code: 0}
|
||||
c.JSON(200, &r)
|
||||
})
|
||||
router.GET(CONNECTIONS, func(c *gin.Context) {
|
||||
conns := make([]ConnClient, 0)
|
||||
b.clients.Range(func (k, v interface{}) bool {
|
||||
cl, _ := v.(*client)
|
||||
var pubPack = PubPacket{}
|
||||
if cl.info.willMsg != nil {
|
||||
pubPack.TopicName = cl.info.willMsg.TopicName
|
||||
pubPack.Payload = cl.info.willMsg.Payload
|
||||
}
|
||||
|
||||
msg := ConnClient{
|
||||
Info: Info{
|
||||
ClientID: cl.info.clientID,
|
||||
Username: cl.info.username,
|
||||
Password: cl.info.password,
|
||||
Keepalive: cl.info.keepalive,
|
||||
WillMsg: pubPack,
|
||||
},
|
||||
LastMsgTime: cl.lastMsgTime,
|
||||
}
|
||||
|
||||
conns = append(conns, msg)
|
||||
return true
|
||||
})
|
||||
r := resp{Clients: conns}
|
||||
c.JSON(200, &r)
|
||||
})
|
||||
|
||||
router.Run(":" + b.config.HTTPPort)
|
||||
}
|
||||
@@ -4,10 +4,9 @@ import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/eclipse/paho.mqtt.golang/packets"
|
||||
|
||||
simplejson "github.com/bitly/go-simplejson"
|
||||
log "github.com/cihub/seelog"
|
||||
"github.com/eclipse/paho.mqtt.golang/packets"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
func (c *client) SendInfo() {
|
||||
@@ -16,29 +15,27 @@ func (c *client) SendInfo() {
|
||||
}
|
||||
url := c.info.localIP + ":" + c.broker.config.Cluster.Port
|
||||
|
||||
infoMsg := NewInfo(c.broker.id, url, false)
|
||||
infoMsg := NewInfo(c.broker.id, url)
|
||||
err := c.WriterPacket(infoMsg)
|
||||
if err != nil {
|
||||
log.Error("send info message error, ", err)
|
||||
log.Error("send info message error, ", zap.Error(err))
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func (c *client) StartPing() {
|
||||
timeTicker := time.NewTicker(time.Second * 30)
|
||||
timeTicker := time.NewTicker(time.Second * 50)
|
||||
ping := packets.NewControlPacket(packets.Pingreq).(*packets.PingreqPacket)
|
||||
for {
|
||||
select {
|
||||
case <-timeTicker.C:
|
||||
err := c.WriterPacket(ping)
|
||||
if err != nil {
|
||||
log.Error("ping error: ", err)
|
||||
log.Error("ping error: ", zap.Error(err))
|
||||
c.Close()
|
||||
}
|
||||
case _, ok := <-c.closed:
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
case <-c.ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -49,25 +46,26 @@ func (c *client) SendConnect() {
|
||||
return
|
||||
}
|
||||
m := packets.NewControlPacket(packets.Connect).(*packets.ConnectPacket)
|
||||
m.ProtocolName = "MQIsdp"
|
||||
m.ProtocolVersion = 3
|
||||
|
||||
m.CleanSession = true
|
||||
m.ClientIdentifier = c.info.clientID
|
||||
m.Keepalive = uint16(60)
|
||||
err := c.WriterPacket(m)
|
||||
if err != nil {
|
||||
log.Error("send connect message error, ", err)
|
||||
log.Error("send connect message error, ", zap.Error(err))
|
||||
return
|
||||
}
|
||||
log.Info("send connect success")
|
||||
}
|
||||
|
||||
func NewInfo(sid, url string, isforword bool) *packets.PublishPacket {
|
||||
func NewInfo(sid, url string) *packets.PublishPacket {
|
||||
pub := packets.NewControlPacket(packets.Publish).(*packets.PublishPacket)
|
||||
pub.Qos = 0
|
||||
pub.TopicName = BrokerInfoTopic
|
||||
pub.Retain = false
|
||||
info := fmt.Sprintf(`{"remoteID":"%s","url":"%s","isForward":%t}`, sid, url, isforword)
|
||||
// log.Info("new info", string(info))
|
||||
info := fmt.Sprintf(`{"brokerID":"%s","brokerUrl":"%s"}`, sid, url)
|
||||
pub.Payload = []byte(info)
|
||||
return pub
|
||||
}
|
||||
@@ -79,51 +77,36 @@ func (c *client) ProcessInfo(packet *packets.PublishPacket) {
|
||||
return
|
||||
}
|
||||
|
||||
log.Info("recv remoteInfo: ", string(packet.Payload))
|
||||
log.Info("recv remoteInfo: ", zap.String("payload", string(packet.Payload)))
|
||||
|
||||
js, e := simplejson.NewJson(packet.Payload)
|
||||
if e != nil {
|
||||
log.Warn("parse info message err", e)
|
||||
js, err := simplejson.NewJson(packet.Payload)
|
||||
if err != nil {
|
||||
log.Warn("parse info message err", zap.Error(err))
|
||||
return
|
||||
}
|
||||
|
||||
rid := js.Get("remoteID").MustString()
|
||||
rurl := js.Get("url").MustString()
|
||||
isForward := js.Get("isForward").MustBool()
|
||||
|
||||
if rid == "" {
|
||||
log.Error("receive info message error with remoteID is null")
|
||||
routes, err := js.Get("data").Map()
|
||||
if routes == nil {
|
||||
log.Error("receive info message error, ", zap.Error(err))
|
||||
return
|
||||
}
|
||||
|
||||
if rid == b.id {
|
||||
if !isForward {
|
||||
c.Close() //close connet self
|
||||
}
|
||||
return
|
||||
}
|
||||
b.nodes = routes
|
||||
|
||||
b.mu.Lock()
|
||||
exist := b.CheckRemoteExist(rid, rurl)
|
||||
if !exist {
|
||||
b.connectRouter(rurl, rid)
|
||||
}
|
||||
b.mu.Unlock()
|
||||
|
||||
if !isForward {
|
||||
if c.typ == ROUTER {
|
||||
route := route{
|
||||
remoteUrl: rurl,
|
||||
remoteID: rid,
|
||||
}
|
||||
c.route = route
|
||||
for rid, rurl := range routes {
|
||||
if rid == b.id {
|
||||
continue
|
||||
}
|
||||
|
||||
go b.SendLocalSubsToRouter(c)
|
||||
// log.Info("BroadcastInfoMessage starting... ")
|
||||
infoMsg := NewInfo(rid, rurl, true)
|
||||
b.BroadcastInfoMessage(rid, infoMsg)
|
||||
}
|
||||
url, ok := rurl.(string)
|
||||
if ok {
|
||||
exist := b.CheckRemoteExist(rid, url)
|
||||
if !exist {
|
||||
b.connectRouter(rid, url)
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
b.mu.Unlock()
|
||||
}
|
||||
|
||||
62
broker/lib/sessions/memprovider.go
Normal file
62
broker/lib/sessions/memprovider.go
Normal file
@@ -0,0 +1,62 @@
|
||||
package sessions
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
)
|
||||
|
||||
var _ SessionsProvider = (*memProvider)(nil)
|
||||
|
||||
func init() {
|
||||
Register("mem", NewMemProvider())
|
||||
}
|
||||
|
||||
type memProvider struct {
|
||||
st map[string]*Session
|
||||
mu sync.RWMutex
|
||||
}
|
||||
|
||||
func NewMemProvider() *memProvider {
|
||||
return &memProvider{
|
||||
st: make(map[string]*Session),
|
||||
}
|
||||
}
|
||||
|
||||
func (this *memProvider) New(id string) (*Session, error) {
|
||||
this.mu.Lock()
|
||||
defer this.mu.Unlock()
|
||||
|
||||
this.st[id] = &Session{id: id}
|
||||
return this.st[id], nil
|
||||
}
|
||||
|
||||
func (this *memProvider) Get(id string) (*Session, error) {
|
||||
this.mu.RLock()
|
||||
defer this.mu.RUnlock()
|
||||
|
||||
sess, ok := this.st[id]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("store/Get: No session found for key %s", id)
|
||||
}
|
||||
|
||||
return sess, nil
|
||||
}
|
||||
|
||||
func (this *memProvider) Del(id string) {
|
||||
this.mu.Lock()
|
||||
defer this.mu.Unlock()
|
||||
delete(this.st, id)
|
||||
}
|
||||
|
||||
func (this *memProvider) Save(id string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (this *memProvider) Count() int {
|
||||
return len(this.st)
|
||||
}
|
||||
|
||||
func (this *memProvider) Close() error {
|
||||
this.st = make(map[string]*Session)
|
||||
return nil
|
||||
}
|
||||
149
broker/lib/sessions/session.go
Normal file
149
broker/lib/sessions/session.go
Normal file
@@ -0,0 +1,149 @@
|
||||
package sessions
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"github.com/eclipse/paho.mqtt.golang/packets"
|
||||
)
|
||||
|
||||
const (
|
||||
// Queue size for the ack queue
|
||||
defaultQueueSize = 16
|
||||
)
|
||||
|
||||
type Session struct {
|
||||
|
||||
// cmsg is the CONNECT message
|
||||
cmsg *packets.ConnectPacket
|
||||
|
||||
// Will message to publish if connect is closed unexpectedly
|
||||
Will *packets.PublishPacket
|
||||
|
||||
// Retained publish message
|
||||
Retained *packets.PublishPacket
|
||||
|
||||
// topics stores all the topis for this session/client
|
||||
topics map[string]byte
|
||||
|
||||
// Initialized?
|
||||
initted bool
|
||||
|
||||
// Serialize access to this session
|
||||
mu sync.Mutex
|
||||
|
||||
id string
|
||||
}
|
||||
|
||||
func (this *Session) Init(msg *packets.ConnectPacket) error {
|
||||
this.mu.Lock()
|
||||
defer this.mu.Unlock()
|
||||
|
||||
if this.initted {
|
||||
return fmt.Errorf("Session already initialized")
|
||||
}
|
||||
|
||||
this.cmsg = msg
|
||||
|
||||
if this.cmsg.WillFlag {
|
||||
this.Will = packets.NewControlPacket(packets.Publish).(*packets.PublishPacket)
|
||||
this.Will.Qos = this.cmsg.Qos
|
||||
this.Will.TopicName = this.cmsg.WillTopic
|
||||
this.Will.Payload = this.cmsg.WillMessage
|
||||
this.Will.Retain = this.cmsg.WillRetain
|
||||
}
|
||||
|
||||
this.topics = make(map[string]byte, 1)
|
||||
|
||||
this.id = msg.ClientIdentifier
|
||||
|
||||
this.initted = true
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (this *Session) Update(msg *packets.ConnectPacket) error {
|
||||
this.mu.Lock()
|
||||
defer this.mu.Unlock()
|
||||
|
||||
this.cmsg = msg
|
||||
return nil
|
||||
}
|
||||
|
||||
func (this *Session) RetainMessage(msg *packets.PublishPacket) error {
|
||||
this.mu.Lock()
|
||||
defer this.mu.Unlock()
|
||||
|
||||
this.Retained = msg
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (this *Session) AddTopic(topic string, qos byte) error {
|
||||
this.mu.Lock()
|
||||
defer this.mu.Unlock()
|
||||
|
||||
if !this.initted {
|
||||
return fmt.Errorf("Session not yet initialized")
|
||||
}
|
||||
|
||||
this.topics[topic] = qos
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (this *Session) RemoveTopic(topic string) error {
|
||||
this.mu.Lock()
|
||||
defer this.mu.Unlock()
|
||||
|
||||
if !this.initted {
|
||||
return fmt.Errorf("Session not yet initialized")
|
||||
}
|
||||
|
||||
delete(this.topics, topic)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (this *Session) Topics() ([]string, []byte, error) {
|
||||
this.mu.Lock()
|
||||
defer this.mu.Unlock()
|
||||
|
||||
if !this.initted {
|
||||
return nil, nil, fmt.Errorf("Session not yet initialized")
|
||||
}
|
||||
|
||||
var (
|
||||
topics []string
|
||||
qoss []byte
|
||||
)
|
||||
|
||||
for k, v := range this.topics {
|
||||
topics = append(topics, k)
|
||||
qoss = append(qoss, v)
|
||||
}
|
||||
|
||||
return topics, qoss, nil
|
||||
}
|
||||
|
||||
func (this *Session) ID() string {
|
||||
return this.cmsg.ClientIdentifier
|
||||
}
|
||||
|
||||
func (this *Session) WillFlag() bool {
|
||||
this.mu.Lock()
|
||||
defer this.mu.Unlock()
|
||||
return this.cmsg.WillFlag
|
||||
}
|
||||
|
||||
func (this *Session) SetWillFlag(v bool) {
|
||||
this.mu.Lock()
|
||||
defer this.mu.Unlock()
|
||||
this.cmsg.WillFlag = v
|
||||
}
|
||||
|
||||
func (this *Session) CleanSession() bool {
|
||||
this.mu.Lock()
|
||||
defer this.mu.Unlock()
|
||||
return this.cmsg.CleanSession
|
||||
}
|
||||
92
broker/lib/sessions/sessions.go
Normal file
92
broker/lib/sessions/sessions.go
Normal file
@@ -0,0 +1,92 @@
|
||||
package sessions
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrSessionsProviderNotFound = errors.New("Session: Session provider not found")
|
||||
ErrKeyNotAvailable = errors.New("Session: not item found for key.")
|
||||
|
||||
providers = make(map[string]SessionsProvider)
|
||||
)
|
||||
|
||||
type SessionsProvider interface {
|
||||
New(id string) (*Session, error)
|
||||
Get(id string) (*Session, error)
|
||||
Del(id string)
|
||||
Save(id string) error
|
||||
Count() int
|
||||
Close() error
|
||||
}
|
||||
|
||||
// Register makes a session provider available by the provided name.
|
||||
// If a Register is called twice with the same name or if the driver is nil,
|
||||
// it panics.
|
||||
func Register(name string, provider SessionsProvider) {
|
||||
if provider == nil {
|
||||
panic("session: Register provide is nil")
|
||||
}
|
||||
|
||||
if _, dup := providers[name]; dup {
|
||||
panic("session: Register called twice for provider " + name)
|
||||
}
|
||||
|
||||
providers[name] = provider
|
||||
}
|
||||
|
||||
func Unregister(name string) {
|
||||
delete(providers, name)
|
||||
}
|
||||
|
||||
type Manager struct {
|
||||
p SessionsProvider
|
||||
}
|
||||
|
||||
func NewManager(providerName string) (*Manager, error) {
|
||||
p, ok := providers[providerName]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("session: unknown provider %q", providerName)
|
||||
}
|
||||
|
||||
return &Manager{p: p}, nil
|
||||
}
|
||||
|
||||
func (this *Manager) New(id string) (*Session, error) {
|
||||
if id == "" {
|
||||
id = this.sessionId()
|
||||
}
|
||||
return this.p.New(id)
|
||||
}
|
||||
|
||||
func (this *Manager) Get(id string) (*Session, error) {
|
||||
return this.p.Get(id)
|
||||
}
|
||||
|
||||
func (this *Manager) Del(id string) {
|
||||
this.p.Del(id)
|
||||
}
|
||||
|
||||
func (this *Manager) Save(id string) error {
|
||||
return this.p.Save(id)
|
||||
}
|
||||
|
||||
func (this *Manager) Count() int {
|
||||
return this.p.Count()
|
||||
}
|
||||
|
||||
func (this *Manager) Close() error {
|
||||
return this.p.Close()
|
||||
}
|
||||
|
||||
func (manager *Manager) sessionId() string {
|
||||
b := make([]byte, 15)
|
||||
if _, err := io.ReadFull(rand.Reader, b); err != nil {
|
||||
return ""
|
||||
}
|
||||
return base64.URLEncoding.EncodeToString(b)
|
||||
}
|
||||
550
broker/lib/topics/memtopics.go
Normal file
550
broker/lib/topics/memtopics.go
Normal file
@@ -0,0 +1,550 @@
|
||||
package topics
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"sync"
|
||||
|
||||
"github.com/eclipse/paho.mqtt.golang/packets"
|
||||
)
|
||||
|
||||
const (
|
||||
QosAtMostOnce byte = iota
|
||||
QosAtLeastOnce
|
||||
QosExactlyOnce
|
||||
QosFailure = 0x80
|
||||
)
|
||||
|
||||
var _ TopicsProvider = (*memTopics)(nil)
|
||||
|
||||
type memTopics struct {
|
||||
// Sub/unsub mutex
|
||||
smu sync.RWMutex
|
||||
// Subscription tree
|
||||
sroot *snode
|
||||
|
||||
// Retained message mutex
|
||||
rmu sync.RWMutex
|
||||
// Retained messages topic tree
|
||||
rroot *rnode
|
||||
}
|
||||
|
||||
func init() {
|
||||
Register("mem", NewMemProvider())
|
||||
}
|
||||
|
||||
// NewMemProvider returns an new instance of the memTopics, which is implements the
|
||||
// TopicsProvider interface. memProvider is a hidden struct that stores the topic
|
||||
// subscriptions and retained messages in memory. The content is not persistend so
|
||||
// when the server goes, everything will be gone. Use with care.
|
||||
func NewMemProvider() *memTopics {
|
||||
return &memTopics{
|
||||
sroot: newSNode(),
|
||||
rroot: newRNode(),
|
||||
}
|
||||
}
|
||||
|
||||
func ValidQos(qos byte) bool {
|
||||
return qos == QosAtMostOnce || qos == QosAtLeastOnce || qos == QosExactlyOnce
|
||||
}
|
||||
|
||||
func (this *memTopics) Subscribe(topic []byte, qos byte, sub interface{}) (byte, error) {
|
||||
if !ValidQos(qos) {
|
||||
return QosFailure, fmt.Errorf("Invalid QoS %d", qos)
|
||||
}
|
||||
|
||||
if sub == nil {
|
||||
return QosFailure, fmt.Errorf("Subscriber cannot be nil")
|
||||
}
|
||||
|
||||
this.smu.Lock()
|
||||
defer this.smu.Unlock()
|
||||
|
||||
if qos > QosExactlyOnce {
|
||||
qos = QosExactlyOnce
|
||||
}
|
||||
|
||||
if err := this.sroot.sinsert(topic, qos, sub); err != nil {
|
||||
return QosFailure, err
|
||||
}
|
||||
|
||||
return qos, nil
|
||||
}
|
||||
|
||||
func (this *memTopics) Unsubscribe(topic []byte, sub interface{}) error {
|
||||
this.smu.Lock()
|
||||
defer this.smu.Unlock()
|
||||
|
||||
return this.sroot.sremove(topic, sub)
|
||||
}
|
||||
|
||||
// Subscribers Returned values will be invalidated by the next Subscribers call
|
||||
func (this *memTopics) Subscribers(topic []byte, qos byte, subs *[]interface{}, qoss *[]byte) error {
|
||||
if !ValidQos(qos) {
|
||||
return fmt.Errorf("Invalid QoS %d", qos)
|
||||
}
|
||||
|
||||
this.smu.RLock()
|
||||
defer this.smu.RUnlock()
|
||||
|
||||
*subs = (*subs)[0:0]
|
||||
*qoss = (*qoss)[0:0]
|
||||
|
||||
return this.sroot.smatch(topic, qos, subs, qoss)
|
||||
}
|
||||
|
||||
func (this *memTopics) Retain(msg *packets.PublishPacket) error {
|
||||
this.rmu.Lock()
|
||||
defer this.rmu.Unlock()
|
||||
|
||||
// So apparently, at least according to the MQTT Conformance/Interoperability
|
||||
// Testing, that a payload of 0 means delete the retain message.
|
||||
// https://eclipse.org/paho/clients/testing/
|
||||
if len(msg.Payload) == 0 {
|
||||
return this.rroot.rremove([]byte(msg.TopicName))
|
||||
}
|
||||
|
||||
return this.rroot.rinsertOrUpdate([]byte(msg.TopicName), msg)
|
||||
}
|
||||
|
||||
func (this *memTopics) Retained(topic []byte, msgs *[]*packets.PublishPacket) error {
|
||||
this.rmu.RLock()
|
||||
defer this.rmu.RUnlock()
|
||||
|
||||
return this.rroot.rmatch(topic, msgs)
|
||||
}
|
||||
|
||||
func (this *memTopics) Close() error {
|
||||
this.sroot = nil
|
||||
this.rroot = nil
|
||||
return nil
|
||||
}
|
||||
|
||||
// subscrition nodes
|
||||
type snode struct {
|
||||
// If this is the end of the topic string, then add subscribers here
|
||||
subs []interface{}
|
||||
qos []byte
|
||||
|
||||
// Otherwise add the next topic level here
|
||||
snodes map[string]*snode
|
||||
}
|
||||
|
||||
func newSNode() *snode {
|
||||
return &snode{
|
||||
snodes: make(map[string]*snode),
|
||||
}
|
||||
}
|
||||
|
||||
func (this *snode) sinsert(topic []byte, qos byte, sub interface{}) error {
|
||||
// If there's no more topic levels, that means we are at the matching snode
|
||||
// to insert the subscriber. So let's see if there's such subscriber,
|
||||
// if so, update it. Otherwise insert it.
|
||||
if len(topic) == 0 {
|
||||
// Let's see if the subscriber is already on the list. If yes, update
|
||||
// QoS and then return.
|
||||
for i := range this.subs {
|
||||
if equal(this.subs[i], sub) {
|
||||
this.qos[i] = qos
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// Otherwise add.
|
||||
this.subs = append(this.subs, sub)
|
||||
this.qos = append(this.qos, qos)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Not the last level, so let's find or create the next level snode, and
|
||||
// recursively call it's insert().
|
||||
|
||||
// ntl = next topic level
|
||||
ntl, rem, err := nextTopicLevel(topic)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
level := string(ntl)
|
||||
|
||||
// Add snode if it doesn't already exist
|
||||
n, ok := this.snodes[level]
|
||||
if !ok {
|
||||
n = newSNode()
|
||||
this.snodes[level] = n
|
||||
}
|
||||
|
||||
return n.sinsert(rem, qos, sub)
|
||||
}
|
||||
|
||||
// This remove implementation ignores the QoS, as long as the subscriber
|
||||
// matches then it's removed
|
||||
func (this *snode) sremove(topic []byte, sub interface{}) error {
|
||||
// If the topic is empty, it means we are at the final matching snode. If so,
|
||||
// let's find the matching subscribers and remove them.
|
||||
if len(topic) == 0 {
|
||||
// If subscriber == nil, then it's signal to remove ALL subscribers
|
||||
if sub == nil {
|
||||
this.subs = this.subs[0:0]
|
||||
this.qos = this.qos[0:0]
|
||||
return nil
|
||||
}
|
||||
|
||||
// If we find the subscriber then remove it from the list. Technically
|
||||
// we just overwrite the slot by shifting all other items up by one.
|
||||
for i := range this.subs {
|
||||
if equal(this.subs[i], sub) {
|
||||
this.subs = append(this.subs[:i], this.subs[i+1:]...)
|
||||
this.qos = append(this.qos[:i], this.qos[i+1:]...)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
return fmt.Errorf("No topic found for subscriber")
|
||||
}
|
||||
|
||||
// Not the last level, so let's find the next level snode, and recursively
|
||||
// call it's remove().
|
||||
|
||||
// ntl = next topic level
|
||||
ntl, rem, err := nextTopicLevel(topic)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
level := string(ntl)
|
||||
|
||||
// Find the snode that matches the topic level
|
||||
n, ok := this.snodes[level]
|
||||
if !ok {
|
||||
return fmt.Errorf("No topic found")
|
||||
}
|
||||
|
||||
// Remove the subscriber from the next level snode
|
||||
if err := n.sremove(rem, sub); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// If there are no more subscribers and snodes to the next level we just visited
|
||||
// let's remove it
|
||||
if len(n.subs) == 0 && len(n.snodes) == 0 {
|
||||
delete(this.snodes, level)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// smatch() returns all the subscribers that are subscribed to the topic. Given a topic
|
||||
// with no wildcards (publish topic), it returns a list of subscribers that subscribes
|
||||
// to the topic. For each of the level names, it's a match
|
||||
// - if there are subscribers to '#', then all the subscribers are added to result set
|
||||
func (this *snode) smatch(topic []byte, qos byte, subs *[]interface{}, qoss *[]byte) error {
|
||||
// If the topic is empty, it means we are at the final matching snode. If so,
|
||||
// let's find the subscribers that match the qos and append them to the list.
|
||||
if len(topic) == 0 {
|
||||
this.matchQos(qos, subs, qoss)
|
||||
if mwcn, _ := this.snodes[MWC]; mwcn != nil {
|
||||
mwcn.matchQos(qos, subs, qoss)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ntl = next topic level
|
||||
ntl, rem, err := nextTopicLevel(topic)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
level := string(ntl)
|
||||
|
||||
for k, n := range this.snodes {
|
||||
// If the key is "#", then these subscribers are added to the result set
|
||||
if k == MWC {
|
||||
n.matchQos(qos, subs, qoss)
|
||||
} else if k == SWC || k == level {
|
||||
if err := n.smatch(rem, qos, subs, qoss); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// retained message nodes
|
||||
type rnode struct {
|
||||
// If this is the end of the topic string, then add retained messages here
|
||||
msg *packets.PublishPacket
|
||||
// Otherwise add the next topic level here
|
||||
rnodes map[string]*rnode
|
||||
}
|
||||
|
||||
func newRNode() *rnode {
|
||||
return &rnode{
|
||||
rnodes: make(map[string]*rnode),
|
||||
}
|
||||
}
|
||||
|
||||
func (this *rnode) rinsertOrUpdate(topic []byte, msg *packets.PublishPacket) error {
|
||||
// If there's no more topic levels, that means we are at the matching rnode.
|
||||
if len(topic) == 0 {
|
||||
// Reuse the message if possible
|
||||
this.msg = msg
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Not the last level, so let's find or create the next level snode, and
|
||||
// recursively call it's insert().
|
||||
|
||||
// ntl = next topic level
|
||||
ntl, rem, err := nextTopicLevel(topic)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
level := string(ntl)
|
||||
|
||||
// Add snode if it doesn't already exist
|
||||
n, ok := this.rnodes[level]
|
||||
if !ok {
|
||||
n = newRNode()
|
||||
this.rnodes[level] = n
|
||||
}
|
||||
|
||||
return n.rinsertOrUpdate(rem, msg)
|
||||
}
|
||||
|
||||
// Remove the retained message for the supplied topic
|
||||
func (this *rnode) rremove(topic []byte) error {
|
||||
// If the topic is empty, it means we are at the final matching rnode. If so,
|
||||
// let's remove the buffer and message.
|
||||
if len(topic) == 0 {
|
||||
this.msg = nil
|
||||
return nil
|
||||
}
|
||||
|
||||
// Not the last level, so let's find the next level rnode, and recursively
|
||||
// call it's remove().
|
||||
|
||||
// ntl = next topic level
|
||||
ntl, rem, err := nextTopicLevel(topic)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
level := string(ntl)
|
||||
|
||||
// Find the rnode that matches the topic level
|
||||
n, ok := this.rnodes[level]
|
||||
if !ok {
|
||||
return fmt.Errorf("No topic found")
|
||||
}
|
||||
|
||||
// Remove the subscriber from the next level rnode
|
||||
if err := n.rremove(rem); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// If there are no more rnodes to the next level we just visited let's remove it
|
||||
if len(n.rnodes) == 0 {
|
||||
delete(this.rnodes, level)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// rmatch() finds the retained messages for the topic and qos provided. It's somewhat
|
||||
// of a reverse match compare to match() since the supplied topic can contain
|
||||
// wildcards, whereas the retained message topic is a full (no wildcard) topic.
|
||||
func (this *rnode) rmatch(topic []byte, msgs *[]*packets.PublishPacket) error {
|
||||
// If the topic is empty, it means we are at the final matching rnode. If so,
|
||||
// add the retained msg to the list.
|
||||
if len(topic) == 0 {
|
||||
if this.msg != nil {
|
||||
*msgs = append(*msgs, this.msg)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ntl = next topic level
|
||||
ntl, rem, err := nextTopicLevel(topic)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
level := string(ntl)
|
||||
|
||||
if level == MWC {
|
||||
// If '#', add all retained messages starting this node
|
||||
this.allRetained(msgs)
|
||||
} else if level == SWC {
|
||||
// If '+', check all nodes at this level. Next levels must be matched.
|
||||
for _, n := range this.rnodes {
|
||||
if err := n.rmatch(rem, msgs); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// Otherwise, find the matching node, go to the next level
|
||||
if n, ok := this.rnodes[level]; ok {
|
||||
if err := n.rmatch(rem, msgs); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (this *rnode) allRetained(msgs *[]*packets.PublishPacket) {
|
||||
if this.msg != nil {
|
||||
*msgs = append(*msgs, this.msg)
|
||||
}
|
||||
|
||||
for _, n := range this.rnodes {
|
||||
n.allRetained(msgs)
|
||||
}
|
||||
}
|
||||
|
||||
const (
|
||||
stateCHR byte = iota // Regular character
|
||||
stateMWC // Multi-level wildcard
|
||||
stateSWC // Single-level wildcard
|
||||
stateSEP // Topic level separator
|
||||
stateSYS // System level topic ($)
|
||||
)
|
||||
|
||||
// Returns topic level, remaining topic levels and any errors
|
||||
func nextTopicLevel(topic []byte) ([]byte, []byte, error) {
|
||||
s := stateCHR
|
||||
|
||||
for i, c := range topic {
|
||||
switch c {
|
||||
case '/':
|
||||
if s == stateMWC {
|
||||
return nil, nil, fmt.Errorf("Multi-level wildcard found in topic and it's not at the last level")
|
||||
}
|
||||
|
||||
if i == 0 {
|
||||
return []byte(SWC), topic[i+1:], nil
|
||||
}
|
||||
|
||||
return topic[:i], topic[i+1:], nil
|
||||
|
||||
case '#':
|
||||
if i != 0 {
|
||||
return nil, nil, fmt.Errorf("Wildcard character '#' must occupy entire topic level")
|
||||
}
|
||||
|
||||
s = stateMWC
|
||||
|
||||
case '+':
|
||||
if i != 0 {
|
||||
return nil, nil, fmt.Errorf("Wildcard character '+' must occupy entire topic level")
|
||||
}
|
||||
|
||||
s = stateSWC
|
||||
|
||||
// case '$':
|
||||
// if i == 0 {
|
||||
// return nil, nil, fmt.Errorf("Cannot publish to $ topics")
|
||||
// }
|
||||
|
||||
// s = stateSYS
|
||||
|
||||
default:
|
||||
if s == stateMWC || s == stateSWC {
|
||||
return nil, nil, fmt.Errorf("Wildcard characters '#' and '+' must occupy entire topic level")
|
||||
}
|
||||
|
||||
s = stateCHR
|
||||
}
|
||||
}
|
||||
|
||||
// If we got here that means we didn't hit the separator along the way, so the
|
||||
// topic is either empty, or does not contain a separator. Either way, we return
|
||||
// the full topic
|
||||
return topic, nil, nil
|
||||
}
|
||||
|
||||
// The QoS of the payload messages sent in response to a subscription must be the
|
||||
// minimum of the QoS of the originally published message (in this case, it's the
|
||||
// qos parameter) and the maximum QoS granted by the server (in this case, it's
|
||||
// the QoS in the topic tree).
|
||||
//
|
||||
// It's also possible that even if the topic matches, the subscriber is not included
|
||||
// due to the QoS granted is lower than the published message QoS. For example,
|
||||
// if the client is granted only QoS 0, and the publish message is QoS 1, then this
|
||||
// client is not to be send the published message.
|
||||
func (this *snode) matchQos(qos byte, subs *[]interface{}, qoss *[]byte) {
|
||||
for _, sub := range this.subs {
|
||||
// If the published QoS is higher than the subscriber QoS, then we skip the
|
||||
// subscriber. Otherwise, add to the list.
|
||||
// if qos >= this.qos[i] {
|
||||
*subs = append(*subs, sub)
|
||||
*qoss = append(*qoss, qos)
|
||||
// }
|
||||
}
|
||||
}
|
||||
|
||||
func equal(k1, k2 interface{}) bool {
|
||||
if reflect.TypeOf(k1) != reflect.TypeOf(k2) {
|
||||
return false
|
||||
}
|
||||
|
||||
if reflect.ValueOf(k1).Kind() == reflect.Func {
|
||||
return &k1 == &k2
|
||||
}
|
||||
|
||||
if k1 == k2 {
|
||||
return true
|
||||
}
|
||||
|
||||
switch k1 := k1.(type) {
|
||||
case string:
|
||||
return k1 == k2.(string)
|
||||
|
||||
case int64:
|
||||
return k1 == k2.(int64)
|
||||
|
||||
case int32:
|
||||
return k1 == k2.(int32)
|
||||
|
||||
case int16:
|
||||
return k1 == k2.(int16)
|
||||
|
||||
case int8:
|
||||
return k1 == k2.(int8)
|
||||
|
||||
case int:
|
||||
return k1 == k2.(int)
|
||||
|
||||
case float32:
|
||||
return k1 == k2.(float32)
|
||||
|
||||
case float64:
|
||||
return k1 == k2.(float64)
|
||||
|
||||
case uint:
|
||||
return k1 == k2.(uint)
|
||||
|
||||
case uint8:
|
||||
return k1 == k2.(uint8)
|
||||
|
||||
case uint16:
|
||||
return k1 == k2.(uint16)
|
||||
|
||||
case uint32:
|
||||
return k1 == k2.(uint32)
|
||||
|
||||
case uint64:
|
||||
return k1 == k2.(uint64)
|
||||
|
||||
case uintptr:
|
||||
return k1 == k2.(uintptr)
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
91
broker/lib/topics/topics.go
Normal file
91
broker/lib/topics/topics.go
Normal file
@@ -0,0 +1,91 @@
|
||||
package topics
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/eclipse/paho.mqtt.golang/packets"
|
||||
)
|
||||
|
||||
const (
|
||||
// MWC is the multi-level wildcard
|
||||
MWC = "#"
|
||||
|
||||
// SWC is the single level wildcard
|
||||
SWC = "+"
|
||||
|
||||
// SEP is the topic level separator
|
||||
SEP = "/"
|
||||
|
||||
// SYS is the starting character of the system level topics
|
||||
SYS = "$"
|
||||
|
||||
// Both wildcards
|
||||
_WC = "#+"
|
||||
)
|
||||
|
||||
var (
|
||||
providers = make(map[string]TopicsProvider)
|
||||
)
|
||||
|
||||
// TopicsProvider
|
||||
type TopicsProvider interface {
|
||||
Subscribe(topic []byte, qos byte, subscriber interface{}) (byte, error)
|
||||
Unsubscribe(topic []byte, subscriber interface{}) error
|
||||
Subscribers(topic []byte, qos byte, subs *[]interface{}, qoss *[]byte) error
|
||||
Retain(msg *packets.PublishPacket) error
|
||||
Retained(topic []byte, msgs *[]*packets.PublishPacket) error
|
||||
Close() error
|
||||
}
|
||||
|
||||
func Register(name string, provider TopicsProvider) {
|
||||
if provider == nil {
|
||||
panic("topics: Register provide is nil")
|
||||
}
|
||||
|
||||
if _, dup := providers[name]; dup {
|
||||
panic("topics: Register called twice for provider " + name)
|
||||
}
|
||||
|
||||
providers[name] = provider
|
||||
}
|
||||
|
||||
func Unregister(name string) {
|
||||
delete(providers, name)
|
||||
}
|
||||
|
||||
type Manager struct {
|
||||
p TopicsProvider
|
||||
}
|
||||
|
||||
func NewManager(providerName string) (*Manager, error) {
|
||||
p, ok := providers[providerName]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("session: unknown provider %q", providerName)
|
||||
}
|
||||
|
||||
return &Manager{p: p}, nil
|
||||
}
|
||||
|
||||
func (this *Manager) Subscribe(topic []byte, qos byte, subscriber interface{}) (byte, error) {
|
||||
return this.p.Subscribe(topic, qos, subscriber)
|
||||
}
|
||||
|
||||
func (this *Manager) Unsubscribe(topic []byte, subscriber interface{}) error {
|
||||
return this.p.Unsubscribe(topic, subscriber)
|
||||
}
|
||||
|
||||
func (this *Manager) Subscribers(topic []byte, qos byte, subs *[]interface{}, qoss *[]byte) error {
|
||||
return this.p.Subscribers(topic, qos, subs, qoss)
|
||||
}
|
||||
|
||||
func (this *Manager) Retain(msg *packets.PublishPacket) error {
|
||||
return this.p.Retain(msg)
|
||||
}
|
||||
|
||||
func (this *Manager) Retained(topic []byte, msgs *[]*packets.PublishPacket) error {
|
||||
return this.p.Retained(topic, msgs)
|
||||
}
|
||||
|
||||
func (this *Manager) Close() error {
|
||||
return this.p.Close()
|
||||
}
|
||||
@@ -1,62 +0,0 @@
|
||||
package broker
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/eclipse/paho.mqtt.golang/packets"
|
||||
)
|
||||
|
||||
const (
|
||||
MaxUser = 1024 * 1024
|
||||
MessagePoolNum = 1024
|
||||
MessagePoolUser = MaxUser / MessagePoolNum
|
||||
MessagePoolMessageNum = MaxUser / MessagePoolNum * 4
|
||||
)
|
||||
|
||||
type Message struct {
|
||||
client *client
|
||||
packet packets.ControlPacket
|
||||
}
|
||||
|
||||
var (
|
||||
MSGPool []MessagePool
|
||||
)
|
||||
|
||||
type MessagePool struct {
|
||||
l sync.Mutex
|
||||
maxuser int
|
||||
user int
|
||||
queue chan *Message
|
||||
}
|
||||
|
||||
func InitMessagePool() {
|
||||
MSGPool = make([]MessagePool, (MessagePoolNum + 2))
|
||||
for i := 0; i < (MessagePoolNum + 2); i++ {
|
||||
MSGPool[i].Init(MessagePoolUser, MessagePoolMessageNum)
|
||||
}
|
||||
}
|
||||
|
||||
func (p *MessagePool) Init(num int, maxusernum int) {
|
||||
p.maxuser = maxusernum
|
||||
p.queue = make(chan *Message, num)
|
||||
}
|
||||
|
||||
func (p *MessagePool) GetPool() *MessagePool {
|
||||
p.l.Lock()
|
||||
if p.user+1 < p.maxuser {
|
||||
p.user += 1
|
||||
p.l.Unlock()
|
||||
return p
|
||||
} else {
|
||||
p.l.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func (p *MessagePool) Reduce() {
|
||||
p.l.Lock()
|
||||
p.user -= 1
|
||||
p.l.Unlock()
|
||||
|
||||
}
|
||||
122
broker/retain.go
122
broker/retain.go
@@ -1,122 +0,0 @@
|
||||
package broker
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/eclipse/paho.mqtt.golang/packets"
|
||||
)
|
||||
|
||||
type RetainList struct {
|
||||
sync.RWMutex
|
||||
root *rlevel
|
||||
}
|
||||
type rlevel struct {
|
||||
nodes map[string]*rnode
|
||||
}
|
||||
type rnode struct {
|
||||
next *rlevel
|
||||
msg *packets.PublishPacket
|
||||
}
|
||||
type RetainResult struct {
|
||||
msg []*packets.PublishPacket
|
||||
}
|
||||
|
||||
func newRNode() *rnode {
|
||||
return &rnode{}
|
||||
}
|
||||
|
||||
func newRLevel() *rlevel {
|
||||
return &rlevel{nodes: make(map[string]*rnode)}
|
||||
}
|
||||
|
||||
func NewRetainList() *RetainList {
|
||||
return &RetainList{root: newRLevel()}
|
||||
}
|
||||
|
||||
func (r *RetainList) Insert(topic string, buf *packets.PublishPacket) error {
|
||||
|
||||
tokens, err := PublishTopicCheckAndSpilt(topic)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// log.Info("insert tokens:", tokens)
|
||||
r.Lock()
|
||||
|
||||
l := r.root
|
||||
var n *rnode
|
||||
for _, t := range tokens {
|
||||
n = l.nodes[t]
|
||||
if n == nil {
|
||||
n = newRNode()
|
||||
l.nodes[t] = n
|
||||
}
|
||||
if n.next == nil {
|
||||
n.next = newRLevel()
|
||||
}
|
||||
l = n.next
|
||||
}
|
||||
n.msg = buf
|
||||
r.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *RetainList) Match(topic string) []*packets.PublishPacket {
|
||||
|
||||
tokens, err := SubscribeTopicCheckAndSpilt(topic)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
results := &RetainResult{}
|
||||
|
||||
r.Lock()
|
||||
l := r.root
|
||||
matchRLevel(l, tokens, results)
|
||||
r.Unlock()
|
||||
// log.Info("results: ", results)
|
||||
return results.msg
|
||||
|
||||
}
|
||||
func matchRLevel(l *rlevel, toks []string, results *RetainResult) {
|
||||
var n *rnode
|
||||
for i, t := range toks {
|
||||
if l == nil {
|
||||
return
|
||||
}
|
||||
// log.Info("l info :", l.nodes)
|
||||
if t == "#" {
|
||||
for _, n := range l.nodes {
|
||||
n.GetAll(results)
|
||||
}
|
||||
}
|
||||
if t == "+" {
|
||||
for _, n := range l.nodes {
|
||||
if len(t[i+1:]) == 0 {
|
||||
results.msg = append(results.msg, n.msg)
|
||||
} else {
|
||||
matchRLevel(n.next, toks[i+1:], results)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
n = l.nodes[t]
|
||||
if n != nil {
|
||||
l = n.next
|
||||
} else {
|
||||
l = nil
|
||||
}
|
||||
}
|
||||
if n != nil {
|
||||
results.msg = append(results.msg, n.msg)
|
||||
}
|
||||
}
|
||||
|
||||
func (r *rnode) GetAll(results *RetainResult) {
|
||||
// log.Info("node 's message: ", string(r.msg))
|
||||
if r.msg != nil {
|
||||
results.msg = append(results.msg, r.msg)
|
||||
}
|
||||
l := r.next
|
||||
for _, n := range l.nodes {
|
||||
n.GetAll(results)
|
||||
}
|
||||
}
|
||||
53
broker/session.go
Normal file
53
broker/session.go
Normal file
@@ -0,0 +1,53 @@
|
||||
package broker
|
||||
|
||||
import "github.com/eclipse/paho.mqtt.golang/packets"
|
||||
|
||||
func (b *Broker) getSession(cli *client, req *packets.ConnectPacket, resp *packets.ConnackPacket) error {
|
||||
// If CleanSession is set to 0, the server MUST resume communications with the
|
||||
// client based on state from the current session, as identified by the client
|
||||
// identifier. If there is no session associated with the client identifier the
|
||||
// server must create a new session.
|
||||
//
|
||||
// If CleanSession is set to 1, the client and server must discard any previous
|
||||
// session and start a new one. b session lasts as long as the network c
|
||||
// onnection. State data associated with b session must not be reused in any
|
||||
// subsequent session.
|
||||
|
||||
var err error
|
||||
|
||||
// Check to see if the client supplied an ID, if not, generate one and set
|
||||
// clean session.
|
||||
|
||||
if len(req.ClientIdentifier) == 0 {
|
||||
req.CleanSession = true
|
||||
}
|
||||
|
||||
cid := req.ClientIdentifier
|
||||
|
||||
// If CleanSession is NOT set, check the session store for existing session.
|
||||
// If found, return it.
|
||||
if !req.CleanSession {
|
||||
if cli.session, err = b.sessionMgr.Get(cid); err == nil {
|
||||
resp.SessionPresent = true
|
||||
|
||||
if err := cli.session.Update(req); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// If CleanSession, or no existing session found, then create a new one
|
||||
if cli.session == nil {
|
||||
if cli.session, err = b.sessionMgr.New(cid); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
resp.SessionPresent = false
|
||||
|
||||
if err := cli.session.Init(req); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -1,316 +0,0 @@
|
||||
package broker
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"sync"
|
||||
|
||||
log "github.com/cihub/seelog"
|
||||
)
|
||||
|
||||
// A result structure better optimized for queue subs.
|
||||
type SublistResult struct {
|
||||
psubs []*subscription
|
||||
qsubs []*subscription // don't make this a map, too expensive to iterate
|
||||
}
|
||||
|
||||
// A Sublist stores and efficiently retrieves subscriptions.
|
||||
type Sublist struct {
|
||||
sync.RWMutex
|
||||
cache map[string]*SublistResult
|
||||
root *level
|
||||
}
|
||||
|
||||
// A node contains subscriptions and a pointer to the next level.
|
||||
type node struct {
|
||||
next *level
|
||||
psubs []*subscription
|
||||
qsubs []*subscription
|
||||
}
|
||||
|
||||
// A level represents a group of nodes and special pointers to
|
||||
// wildcard nodes.
|
||||
type level struct {
|
||||
nodes map[string]*node
|
||||
}
|
||||
|
||||
// Create a new default node.
|
||||
func newNode() *node {
|
||||
return &node{psubs: make([]*subscription, 0, 4), qsubs: make([]*subscription, 0, 4)}
|
||||
}
|
||||
|
||||
// Create a new default level. We use FNV1A as the hash
|
||||
// algortihm for the tokens, which should be short.
|
||||
func newLevel() *level {
|
||||
return &level{nodes: make(map[string]*node)}
|
||||
}
|
||||
|
||||
// New will create a default sublist
|
||||
func NewSublist() *Sublist {
|
||||
return &Sublist{root: newLevel(), cache: make(map[string]*SublistResult)}
|
||||
}
|
||||
|
||||
// Insert adds a subscription into the sublist
|
||||
func (s *Sublist) Insert(sub *subscription) error {
|
||||
|
||||
tokens, err := SubscribeTopicCheckAndSpilt(sub.topic)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
s.Lock()
|
||||
|
||||
l := s.root
|
||||
var n *node
|
||||
for _, t := range tokens {
|
||||
n = l.nodes[t]
|
||||
if n == nil {
|
||||
n = newNode()
|
||||
l.nodes[t] = n
|
||||
}
|
||||
if n.next == nil {
|
||||
n.next = newLevel()
|
||||
}
|
||||
l = n.next
|
||||
}
|
||||
if sub.queue {
|
||||
//check qsub is already exist
|
||||
for i := range n.qsubs {
|
||||
if equal(n.qsubs[i], sub) {
|
||||
n.qsubs[i] = sub
|
||||
return nil
|
||||
}
|
||||
}
|
||||
n.qsubs = append(n.qsubs, sub)
|
||||
} else {
|
||||
//check psub is already exist
|
||||
for i := range n.psubs {
|
||||
if equal(n.psubs[i], sub) {
|
||||
n.psubs[i] = sub
|
||||
return nil
|
||||
}
|
||||
}
|
||||
n.psubs = append(n.psubs, sub)
|
||||
}
|
||||
|
||||
topic := string(sub.topic)
|
||||
s.addToCache(topic, sub)
|
||||
s.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Sublist) addToCache(topic string, sub *subscription) {
|
||||
for k, r := range s.cache {
|
||||
if matchLiteral(k, topic) {
|
||||
// Copy since others may have a reference.
|
||||
nr := copyResult(r)
|
||||
if sub.queue == false {
|
||||
nr.psubs = append(nr.psubs, sub)
|
||||
} else {
|
||||
nr.qsubs = append(nr.qsubs, sub)
|
||||
}
|
||||
s.cache[k] = nr
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Sublist) removeFromCache(topic string, sub *subscription) {
|
||||
for k := range s.cache {
|
||||
if !matchLiteral(k, topic) {
|
||||
continue
|
||||
}
|
||||
// Since someone else may be referecing, can't modify the list
|
||||
// safely, just let it re-populate.
|
||||
delete(s.cache, k)
|
||||
}
|
||||
}
|
||||
|
||||
func matchLiteral(literal, topic string) bool {
|
||||
tok, _ := SubscribeTopicCheckAndSpilt(topic)
|
||||
li, _ := PublishTopicCheckAndSpilt(literal)
|
||||
|
||||
for i := 0; i < len(tok); i++ {
|
||||
b := tok[i]
|
||||
switch b {
|
||||
case "+":
|
||||
|
||||
case "#":
|
||||
return true
|
||||
default:
|
||||
if b != li[i] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Deep copy
|
||||
func copyResult(r *SublistResult) *SublistResult {
|
||||
nr := &SublistResult{}
|
||||
nr.psubs = append([]*subscription(nil), r.psubs...)
|
||||
nr.qsubs = append([]*subscription(nil), r.qsubs...)
|
||||
return nr
|
||||
}
|
||||
|
||||
func (s *Sublist) Remove(sub *subscription) error {
|
||||
tokens, err := SubscribeTopicCheckAndSpilt(sub.topic)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
|
||||
l := s.root
|
||||
var n *node
|
||||
|
||||
for _, t := range tokens {
|
||||
if l == nil {
|
||||
return errors.New("No Matches subscription Found")
|
||||
}
|
||||
n = l.nodes[t]
|
||||
if n != nil {
|
||||
l = n.next
|
||||
} else {
|
||||
l = nil
|
||||
}
|
||||
}
|
||||
if !s.removeFromNode(n, sub) {
|
||||
return errors.New("No Matches subscription Found")
|
||||
}
|
||||
topic := string(sub.topic)
|
||||
s.removeFromCache(topic, sub)
|
||||
return nil
|
||||
|
||||
}
|
||||
|
||||
func (s *Sublist) removeFromNode(n *node, sub *subscription) (found bool) {
|
||||
if n == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
if sub.queue {
|
||||
n.qsubs, found = removeSubFromList(sub, n.qsubs)
|
||||
return found
|
||||
} else {
|
||||
n.psubs, found = removeSubFromList(sub, n.psubs)
|
||||
return found
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func (s *Sublist) Match(topic string) *SublistResult {
|
||||
s.RLock()
|
||||
rc, ok := s.cache[topic]
|
||||
s.RUnlock()
|
||||
|
||||
if ok {
|
||||
return rc
|
||||
}
|
||||
|
||||
tokens, err := PublishTopicCheckAndSpilt(topic)
|
||||
if err != nil {
|
||||
log.Error("\tserver/sublist.go: ", err)
|
||||
return nil
|
||||
}
|
||||
|
||||
result := &SublistResult{}
|
||||
|
||||
s.Lock()
|
||||
l := s.root
|
||||
if len(tokens) > 0 {
|
||||
if tokens[0] == "/" {
|
||||
if _, exist := l.nodes["#"]; exist {
|
||||
addNodeToResults(l.nodes["#"], result)
|
||||
}
|
||||
if _, exist := l.nodes["+"]; exist {
|
||||
matchLevel(l.nodes["/"].next, tokens[1:], result)
|
||||
}
|
||||
if _, exist := l.nodes["/"]; exist {
|
||||
matchLevel(l.nodes["/"].next, tokens[1:], result)
|
||||
}
|
||||
} else {
|
||||
matchLevel(s.root, tokens, result)
|
||||
}
|
||||
}
|
||||
s.cache[topic] = result
|
||||
if len(s.cache) > 1024 {
|
||||
for k := range s.cache {
|
||||
delete(s.cache, k)
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
s.Unlock()
|
||||
return result
|
||||
}
|
||||
|
||||
func matchLevel(l *level, toks []string, results *SublistResult) {
|
||||
var swc, n *node
|
||||
exist := false
|
||||
for i, t := range toks {
|
||||
if l == nil {
|
||||
return
|
||||
}
|
||||
|
||||
if _, exist = l.nodes["#"]; exist {
|
||||
addNodeToResults(l.nodes["#"], results)
|
||||
}
|
||||
if t != "/" {
|
||||
if swc, exist = l.nodes["+"]; exist {
|
||||
matchLevel(l.nodes["+"].next, toks[i+1:], results)
|
||||
}
|
||||
} else {
|
||||
if _, exist = l.nodes["+"]; exist {
|
||||
addNodeToResults(l.nodes["+"], results)
|
||||
}
|
||||
}
|
||||
|
||||
n = l.nodes[t]
|
||||
if n != nil {
|
||||
l = n.next
|
||||
} else {
|
||||
l = nil
|
||||
}
|
||||
}
|
||||
if n != nil {
|
||||
addNodeToResults(n, results)
|
||||
}
|
||||
if swc != nil {
|
||||
addNodeToResults(n, results)
|
||||
}
|
||||
}
|
||||
|
||||
// This will add in a node's results to the total results.
|
||||
func addNodeToResults(n *node, results *SublistResult) {
|
||||
results.psubs = append(results.psubs, n.psubs...)
|
||||
results.qsubs = append(results.qsubs, n.qsubs...)
|
||||
}
|
||||
|
||||
func removeSubFromList(sub *subscription, sl []*subscription) ([]*subscription, bool) {
|
||||
for i := 0; i < len(sl); i++ {
|
||||
if sl[i] == sub {
|
||||
last := len(sl) - 1
|
||||
sl[i] = sl[last]
|
||||
sl[last] = nil
|
||||
sl = sl[:last]
|
||||
return shrinkAsNeeded(sl), true
|
||||
}
|
||||
}
|
||||
return sl, false
|
||||
}
|
||||
|
||||
// Checks if we need to do a resize. This is for very large growth then
|
||||
// subsequent return to a more normal size from unsubscribe.
|
||||
func shrinkAsNeeded(sl []*subscription) []*subscription {
|
||||
lsl := len(sl)
|
||||
csl := cap(sl)
|
||||
// Don't bother if list not too big
|
||||
if csl <= 8 {
|
||||
return sl
|
||||
}
|
||||
pFree := float32(csl-lsl) / float32(csl)
|
||||
if pFree > 0.50 {
|
||||
return append([]*subscription(nil), sl...)
|
||||
}
|
||||
return sl
|
||||
}
|
||||
24
broker/usage.go
Normal file
24
broker/usage.go
Normal file
@@ -0,0 +1,24 @@
|
||||
package broker
|
||||
|
||||
var usageStr = `
|
||||
Usage: hmq [options]
|
||||
|
||||
Broker Options:
|
||||
-w, --worker <number> Worker num to process message, perfer (client num)/10. (default 1024)
|
||||
-p, --port <port> Use port for clients (default: 1883)
|
||||
--host <host> Network host to listen on. (default "0.0.0.0")
|
||||
-ws, --wsport <port> Use port for websocket monitoring
|
||||
-wsp,--wspath <path> Use path for websocket monitoring
|
||||
-c, --config <file> Configuration file
|
||||
|
||||
Logging Options:
|
||||
-d, --debug <bool> Enable debugging output (default false)
|
||||
-D Debug and trace
|
||||
|
||||
Cluster Options:
|
||||
-r, --router <rurl> Router who maintenance cluster info
|
||||
-cp, --clusterport <cluster-port> Cluster listen port for others
|
||||
|
||||
Common Options:
|
||||
-h, --help Show this message
|
||||
`
|
||||
@@ -1,37 +0,0 @@
|
||||
package broker
|
||||
|
||||
type Worker struct {
|
||||
WorkerPool chan chan *Message
|
||||
MsgChannel chan *Message
|
||||
quit chan bool
|
||||
}
|
||||
|
||||
func NewWorker(workerPool chan chan *Message) Worker {
|
||||
return Worker{
|
||||
WorkerPool: workerPool,
|
||||
MsgChannel: make(chan *Message),
|
||||
quit: make(chan bool)}
|
||||
}
|
||||
|
||||
func (w Worker) Start() {
|
||||
go func() {
|
||||
for {
|
||||
// register the current worker into the worker queue.
|
||||
w.WorkerPool <- w.MsgChannel
|
||||
select {
|
||||
case msg := <-w.MsgChannel:
|
||||
// we have received a work request.
|
||||
ProcessMessage(msg)
|
||||
case <-w.quit:
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// Stop signals the worker to stop listening for work requests.
|
||||
func (w Worker) Stop() {
|
||||
go func() {
|
||||
w.quit <- true
|
||||
}()
|
||||
}
|
||||
@@ -1,16 +1,18 @@
|
||||
{
|
||||
"workerNum": 4096,
|
||||
"port": "1883",
|
||||
"v5Port": "1885",
|
||||
"host": "0.0.0.0",
|
||||
"cluster": {
|
||||
"host": "0.0.0.0",
|
||||
"port": "1993",
|
||||
"routes": []
|
||||
"port": "1993"
|
||||
},
|
||||
"httpPort": "8080",
|
||||
"tlsPort": "8883",
|
||||
"tlsHost": "0.0.0.0",
|
||||
"wsPort": "1888",
|
||||
"wsPath": "/ws",
|
||||
"ws5Path": "/ws5",
|
||||
"wsTLS": false,
|
||||
"tlsInfo": {
|
||||
"verify": true,
|
||||
@@ -18,6 +20,8 @@
|
||||
"certFile": "ssl/server/cert.pem",
|
||||
"keyFile": "ssl/server/key.pem"
|
||||
},
|
||||
"acl": false,
|
||||
"aclConf": "conf/acl.conf"
|
||||
"plugins": {
|
||||
"auth": "mock",
|
||||
"bridge": "csvlog"
|
||||
}
|
||||
}
|
||||
|
||||
37
deploy/config.yaml
Normal file
37
deploy/config.yaml
Normal file
@@ -0,0 +1,37 @@
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: mqtt-broker
|
||||
data:
|
||||
hmq.config: |
|
||||
{
|
||||
"workerNum": 4096,
|
||||
"port": "1883",
|
||||
"host": "0.0.0.0",
|
||||
"plugins": ["authhttp","kafka"]
|
||||
}
|
||||
|
||||
kafka.json: |
|
||||
{
|
||||
"addr": [
|
||||
"127.0.0.1:9090"
|
||||
],
|
||||
"onConnect": "onConnect",
|
||||
"onPublish": "onPublish",
|
||||
"onSubscribe": "onSubscribe",
|
||||
"onDisconnect": "onDisconnect",
|
||||
"onUnsubscribe": "onUnsubscribe",
|
||||
"deliverMap": {
|
||||
"#": "publish",
|
||||
"/upload/+/#": "upload"
|
||||
}
|
||||
}
|
||||
|
||||
authhttp.json: |
|
||||
{
|
||||
"auth": "http://127.0.0.1:9090/mqtt/auth",
|
||||
"acl": "http://127.0.0.1:9090/mqtt/acl",
|
||||
"super": "http://127.0.0.1:9090/mqtt/superuser"
|
||||
}
|
||||
|
||||
|
||||
44
deploy/deploy.yaml
Normal file
44
deploy/deploy.yaml
Normal file
@@ -0,0 +1,44 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: mqtt-broker
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: mqtt-broker
|
||||
replicas: 1
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: mqtt-broker
|
||||
spec:
|
||||
containers:
|
||||
- name: mqtt-broker
|
||||
image: hmq:v0.1.0
|
||||
ports:
|
||||
- containerPort: 1883
|
||||
- containerPort: 8080
|
||||
volumeMounts:
|
||||
- name: mqtt-broker
|
||||
mountPath: /conf
|
||||
subPath: hmq.config
|
||||
- name: mqtt-broker
|
||||
mountPath: /plugins/kafka/kafka.json
|
||||
subPath: kafka.json
|
||||
- name: mqtt-broker
|
||||
mountPath: /plugins/authttp/http.json
|
||||
subPath: kafka.json
|
||||
volumes:
|
||||
- name: mqtt-broker
|
||||
configMap:
|
||||
name: mqtt-broker
|
||||
items:
|
||||
- key: hmq.config
|
||||
path: hmq.config
|
||||
items:
|
||||
- key: http.json
|
||||
path: http.json
|
||||
items:
|
||||
- key: kafka.json
|
||||
path: kafka.json
|
||||
|
||||
13
deploy/svc.yaml
Normal file
13
deploy/svc.yaml
Normal file
@@ -0,0 +1,13 @@
|
||||
kind: Service
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: mqtt-broker
|
||||
spec:
|
||||
selector:
|
||||
app: mqtt-broker
|
||||
ports:
|
||||
- protocol: TCP
|
||||
port: 1883
|
||||
targetPort: 1883
|
||||
type: ClusterIP
|
||||
sessionAffinity: ClientIP
|
||||
64
go.mod
Normal file
64
go.mod
Normal file
@@ -0,0 +1,64 @@
|
||||
module github.com/fhmq/hmq
|
||||
|
||||
go 1.21
|
||||
|
||||
require (
|
||||
github.com/Shopify/sarama v1.38.1
|
||||
github.com/bitly/go-simplejson v0.5.0
|
||||
github.com/cespare/xxhash/v2 v2.1.2
|
||||
github.com/eapache/queue v1.1.0
|
||||
github.com/eclipse/paho.golang v0.12.0
|
||||
github.com/eclipse/paho.mqtt.golang v1.4.3
|
||||
github.com/gin-gonic/gin v1.9.1
|
||||
github.com/google/uuid v1.3.0
|
||||
github.com/json-iterator/go v1.1.12
|
||||
github.com/patrickmn/go-cache v2.1.0+incompatible
|
||||
github.com/stretchr/testify v1.8.4
|
||||
go.uber.org/zap v1.24.0
|
||||
golang.org/x/net v0.17.0
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 // indirect
|
||||
github.com/bytedance/sonic v1.9.1 // indirect
|
||||
github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/eapache/go-resiliency v1.3.0 // indirect
|
||||
github.com/eapache/go-xerial-snappy v0.0.0-20230111030713-bf00bc1b83b6 // indirect
|
||||
github.com/gabriel-vasile/mimetype v1.4.2 // indirect
|
||||
github.com/gin-contrib/sse v0.1.0 // indirect
|
||||
github.com/go-playground/locales v0.14.1 // indirect
|
||||
github.com/go-playground/universal-translator v0.18.1 // indirect
|
||||
github.com/go-playground/validator/v10 v10.14.0 // indirect
|
||||
github.com/goccy/go-json v0.10.2 // indirect
|
||||
github.com/golang/snappy v0.0.4 // indirect
|
||||
github.com/hashicorp/errwrap v1.0.0 // indirect
|
||||
github.com/hashicorp/go-multierror v1.1.1 // indirect
|
||||
github.com/hashicorp/go-uuid v1.0.3 // indirect
|
||||
github.com/jcmturner/aescts/v2 v2.0.0 // indirect
|
||||
github.com/jcmturner/dnsutils/v2 v2.0.0 // indirect
|
||||
github.com/jcmturner/gofork v1.7.6 // indirect
|
||||
github.com/jcmturner/gokrb5/v8 v8.4.3 // indirect
|
||||
github.com/jcmturner/rpc/v2 v2.0.3 // indirect
|
||||
github.com/klauspost/compress v1.15.14 // indirect
|
||||
github.com/klauspost/cpuid/v2 v2.2.4 // indirect
|
||||
github.com/leodido/go-urn v1.2.4 // indirect
|
||||
github.com/mattn/go-isatty v0.0.19 // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||
github.com/pelletier/go-toml/v2 v2.0.8 // indirect
|
||||
github.com/pierrec/lz4/v4 v4.1.17 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 // indirect
|
||||
github.com/rogpeppe/go-internal v1.12.0 // indirect
|
||||
github.com/twitchyliquid64/golang-asm v0.15.1 // indirect
|
||||
github.com/ugorji/go/codec v1.2.11 // indirect
|
||||
go.uber.org/atomic v1.7.0 // indirect
|
||||
go.uber.org/multierr v1.6.0 // indirect
|
||||
golang.org/x/arch v0.3.0 // indirect
|
||||
golang.org/x/crypto v0.14.0 // indirect
|
||||
golang.org/x/sys v0.13.0 // indirect
|
||||
golang.org/x/text v0.13.0 // indirect
|
||||
google.golang.org/protobuf v1.30.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
)
|
||||
181
go.sum
Normal file
181
go.sum
Normal file
@@ -0,0 +1,181 @@
|
||||
github.com/Shopify/sarama v1.38.1 h1:lqqPUPQZ7zPqYlWpTh+LQ9bhYNu2xJL6k1SJN4WVe2A=
|
||||
github.com/Shopify/sarama v1.38.1/go.mod h1:iwv9a67Ha8VNa+TifujYoWGxWnu2kNVAQdSdZ4X2o5g=
|
||||
github.com/Shopify/toxiproxy/v2 v2.5.0 h1:i4LPT+qrSlKNtQf5QliVjdP08GyAH8+BUIc9gT0eahc=
|
||||
github.com/Shopify/toxiproxy/v2 v2.5.0/go.mod h1:yhM2epWtAmel9CB8r2+L+PCmhH6yH2pITaPAo7jxJl0=
|
||||
github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8=
|
||||
github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
|
||||
github.com/bitly/go-simplejson v0.5.0 h1:6IH+V8/tVMab511d5bn4M7EwGXZf9Hj6i2xSwkNEM+Y=
|
||||
github.com/bitly/go-simplejson v0.5.0/go.mod h1:cXHtHw4XUPsvGaxgjIAn8PhEWG9NfngEKAMDJEczWVA=
|
||||
github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 h1:DDGfHa7BWjL4YnC6+E63dPcxHo2sUxDIu8g3QgEJdRY=
|
||||
github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4=
|
||||
github.com/bytedance/sonic v1.5.0/go.mod h1:ED5hyg4y6t3/9Ku1R6dU/4KyJ48DZ4jPhfY1O2AihPM=
|
||||
github.com/bytedance/sonic v1.9.1 h1:6iJ6NqdoxCDr6mbY8h18oSO+cShGSMRGCEo7F2h0x8s=
|
||||
github.com/bytedance/sonic v1.9.1/go.mod h1:i736AoUSYt75HyZLoJW9ERYxcy6eaN6h4BZXU064P/U=
|
||||
github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE=
|
||||
github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/chenzhuoyu/base64x v0.0.0-20211019084208-fb5309c8db06/go.mod h1:DH46F32mSOjUmXrMHnKwZdA8wcEefY7UVqBKYGjpdQY=
|
||||
github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311 h1:qSGYFH7+jGhDF8vLC+iwCD4WpbV1EBDSzWkJODFLams=
|
||||
github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311/go.mod h1:b583jCggY9gE99b6G5LEC39OIiVsWj+R97kbl5odCEk=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/eapache/go-resiliency v1.3.0 h1:RRL0nge+cWGlxXbUzJ7yMcq6w2XBEr19dCN6HECGaT0=
|
||||
github.com/eapache/go-resiliency v1.3.0/go.mod h1:5yPzW0MIvSe0JDsv0v+DvcjEv2FyD6iZYSs1ZI+iQho=
|
||||
github.com/eapache/go-xerial-snappy v0.0.0-20230111030713-bf00bc1b83b6 h1:8yY/I9ndfrgrXUbOGObLHKBR4Fl3nZXwM2c7OYTT8hM=
|
||||
github.com/eapache/go-xerial-snappy v0.0.0-20230111030713-bf00bc1b83b6/go.mod h1:YvSRo5mw33fLEx1+DlK6L2VV43tJt5Eyel9n9XBcR+0=
|
||||
github.com/eapache/queue v1.1.0 h1:YOEu7KNc61ntiQlcEeUIoDTJ2o8mQznoNvUhiigpIqc=
|
||||
github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I=
|
||||
github.com/eclipse/paho.golang v0.12.0 h1:EXQFJbJklDnUqW6lyAknMWRhM2NgpHxwrrL8riUmp3Q=
|
||||
github.com/eclipse/paho.golang v0.12.0/go.mod h1:TSDCUivu9JnoR9Hl+H7sQMcHkejWH2/xKK1NJGtLbIE=
|
||||
github.com/eclipse/paho.mqtt.golang v1.4.3 h1:2kwcUGn8seMUfWndX0hGbvH8r7crgcJguQNCyp70xik=
|
||||
github.com/eclipse/paho.mqtt.golang v1.4.3/go.mod h1:CSYvoAlsMkhYOXh/oKyxa8EcBci6dVkLCbo5tTC1RIE=
|
||||
github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw=
|
||||
github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g=
|
||||
github.com/gabriel-vasile/mimetype v1.4.2 h1:w5qFW6JKBz9Y393Y4q372O9A7cUSequkh1Q7OhCmWKU=
|
||||
github.com/gabriel-vasile/mimetype v1.4.2/go.mod h1:zApsH/mKG4w07erKIaJPFiX0Tsq9BFQgN3qGY5GnNgA=
|
||||
github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE=
|
||||
github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI=
|
||||
github.com/gin-gonic/gin v1.9.1 h1:4idEAncQnU5cB7BeOkPtxjfCSye0AAm1R0RVIqJ+Jmg=
|
||||
github.com/gin-gonic/gin v1.9.1/go.mod h1:hPrL7YrpYKXt5YId3A/Tnip5kqbEAP+KLuI3SUcPTeU=
|
||||
github.com/go-playground/assert/v2 v2.2.0 h1:JvknZsQTYeFEAhQwI4qEt9cyV5ONwRHC+lYKSsYSR8s=
|
||||
github.com/go-playground/assert/v2 v2.2.0/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4=
|
||||
github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA=
|
||||
github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY=
|
||||
github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY=
|
||||
github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY=
|
||||
github.com/go-playground/validator/v10 v10.14.0 h1:vgvQWe3XCz3gIeFDm/HnTIbj6UGmg/+t63MyGU2n5js=
|
||||
github.com/go-playground/validator/v10 v10.14.0/go.mod h1:9iXMNT7sEkjXb0I+enO7QXmzG6QCsPWY4zveKFVRSyU=
|
||||
github.com/goccy/go-json v0.10.2 h1:CrxCmQqYDkv1z7lO7Wbh2HN93uovUHgrECaO5ZrCXAU=
|
||||
github.com/goccy/go-json v0.10.2/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I=
|
||||
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
|
||||
github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM=
|
||||
github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
|
||||
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
|
||||
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4=
|
||||
github.com/gorilla/sessions v1.2.1/go.mod h1:dk2InVEVJ0sfLlnXv9EAgkf6ecYs/i80K/zI+bUmuGM=
|
||||
github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA=
|
||||
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
|
||||
github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo=
|
||||
github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM=
|
||||
github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
|
||||
github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8=
|
||||
github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
|
||||
github.com/jcmturner/aescts/v2 v2.0.0 h1:9YKLH6ey7H4eDBXW8khjYslgyqG2xZikXP0EQFKrle8=
|
||||
github.com/jcmturner/aescts/v2 v2.0.0/go.mod h1:AiaICIRyfYg35RUkr8yESTqvSy7csK90qZ5xfvvsoNs=
|
||||
github.com/jcmturner/dnsutils/v2 v2.0.0 h1:lltnkeZGL0wILNvrNiVCR6Ro5PGU/SeBvVO/8c/iPbo=
|
||||
github.com/jcmturner/dnsutils/v2 v2.0.0/go.mod h1:b0TnjGOvI/n42bZa+hmXL+kFJZsFT7G4t3HTlQ184QM=
|
||||
github.com/jcmturner/gofork v1.7.6 h1:QH0l3hzAU1tfT3rZCnW5zXl+orbkNMMRGJfdJjHVETg=
|
||||
github.com/jcmturner/gofork v1.7.6/go.mod h1:1622LH6i/EZqLloHfE7IeZ0uEJwMSUyQ/nDd82IeqRo=
|
||||
github.com/jcmturner/goidentity/v6 v6.0.1 h1:VKnZd2oEIMorCTsFBnJWbExfNN7yZr3EhJAxwOkZg6o=
|
||||
github.com/jcmturner/goidentity/v6 v6.0.1/go.mod h1:X1YW3bgtvwAXju7V3LCIMpY0Gbxyjn/mY9zx4tFonSg=
|
||||
github.com/jcmturner/gokrb5/v8 v8.4.3 h1:iTonLeSJOn7MVUtyMT+arAn5AKAPrkilzhGw8wE/Tq8=
|
||||
github.com/jcmturner/gokrb5/v8 v8.4.3/go.mod h1:dqRwJGXznQrzw6cWmyo6kH+E7jksEQG/CyVWsJEsJO0=
|
||||
github.com/jcmturner/rpc/v2 v2.0.3 h1:7FXXj8Ti1IaVFpSAziCZWNzbNuZmnvw/i6CqLNdWfZY=
|
||||
github.com/jcmturner/rpc/v2 v2.0.3/go.mod h1:VUJYCIDm3PVOEHw8sgt091/20OJjskO/YJki3ELg/Hc=
|
||||
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
|
||||
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
|
||||
github.com/klauspost/compress v1.15.14 h1:i7WCKDToww0wA+9qrUZ1xOjp218vfFo3nTU6UHp+gOc=
|
||||
github.com/klauspost/compress v1.15.14/go.mod h1:QPwzmACJjUTFsnSHH934V6woptycfrDDJnH7hvFVbGM=
|
||||
github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
|
||||
github.com/klauspost/cpuid/v2 v2.2.4 h1:acbojRNwl3o09bUq+yDCtZFc1aiwaAAxtcn8YkZXnvk=
|
||||
github.com/klauspost/cpuid/v2 v2.2.4/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY=
|
||||
github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0=
|
||||
github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk=
|
||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/leodido/go-urn v1.2.4 h1:XlAE/cm/ms7TE/VMVoduSpNBoyc2dOxHs5MZSwAN63Q=
|
||||
github.com/leodido/go-urn v1.2.4/go.mod h1:7ZrI8mTSeBSHl/UaRyKQW1qZeMgak41ANeCNaVckg+4=
|
||||
github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA=
|
||||
github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
|
||||
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
|
||||
github.com/patrickmn/go-cache v2.1.0+incompatible h1:HRMgzkcYKYpi3C8ajMPV8OFXaaRUnok+kx1WdO15EQc=
|
||||
github.com/patrickmn/go-cache v2.1.0+incompatible/go.mod h1:3Qf8kWWT7OJRJbdiICTKqZju1ZixQ/KpMGzzAfe6+WQ=
|
||||
github.com/pelletier/go-toml/v2 v2.0.8 h1:0ctb6s9mE31h0/lhu+J6OPmVeDxJn+kYnJc2jZR9tGQ=
|
||||
github.com/pelletier/go-toml/v2 v2.0.8/go.mod h1:vuYfssBdrU2XDZ9bYydBu6t+6a6PYNcZljzZR9VXg+4=
|
||||
github.com/pierrec/lz4/v4 v4.1.17 h1:kV4Ip+/hUBC+8T6+2EgburRtkE9ef4nbY3f4dFhGjMc=
|
||||
github.com/pierrec/lz4/v4 v4.1.17/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
|
||||
github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
|
||||
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 h1:N/ElC8H3+5XpJzTSTfLsJV/mx9Q9g7kxmchpfZyxgzM=
|
||||
github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
|
||||
github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8=
|
||||
github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
||||
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
||||
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
|
||||
github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=
|
||||
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
|
||||
github.com/twitchyliquid64/golang-asm v0.15.1 h1:SU5vSMR7hnwNxj24w34ZyCi/FmDZTkS4MhqMhdFk5YI=
|
||||
github.com/twitchyliquid64/golang-asm v0.15.1/go.mod h1:a1lVb/DtPvCB8fslRZhAngC2+aY1QWCk3Cedj/Gdt08=
|
||||
github.com/ugorji/go/codec v1.2.11 h1:BMaWp1Bb6fHwEtbplGBGJ498wD+LKlNSl25MjdZY4dU=
|
||||
github.com/ugorji/go/codec v1.2.11/go.mod h1:UNopzCgEMSXjBc6AOMqYvWC1ktqTAfzJZUZgYf6w6lg=
|
||||
go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw=
|
||||
go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
|
||||
go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A=
|
||||
go.uber.org/goleak v1.2.1/go.mod h1:qlT2yGI9QafXHhZZLxlSuNsMw3FFLxBr+tBRlmO1xH4=
|
||||
go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4=
|
||||
go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU=
|
||||
go.uber.org/zap v1.24.0 h1:FiJd5l1UOLj0wCgbSE0rwwXHzEdAZS6hiiSnxJN/D60=
|
||||
go.uber.org/zap v1.24.0/go.mod h1:2kMP+WWQ8aoFoedH3T2sq6iJ2yDWpHbP0f6MQbS9Gkg=
|
||||
golang.org/x/arch v0.0.0-20210923205945-b76863e36670/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8=
|
||||
golang.org/x/arch v0.3.0 h1:02VY4/ZcO/gBOH6PUaoiptASxtXU10jazRCP865E97k=
|
||||
golang.org/x/arch v0.3.0/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||
golang.org/x/crypto v0.14.0 h1:wBqGXzWJW6m1XrIKlAH0Hs1JJ7+9KBwnIO8v66Q9cHc=
|
||||
golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4=
|
||||
golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20220725212005-46097bf591d3/go.mod h1:AaygXjzTFtRAg2ttMY5RMuhpJ3cNnI0XpyFJD1iQRSM=
|
||||
golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM=
|
||||
golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE=
|
||||
golang.org/x/sync v0.4.0 h1:zxkM55ReGkDlKSM+Fu41A+zmbZuaPVbGMzvvdUPznYQ=
|
||||
golang.org/x/sync v0.4.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220704084225-05e143d24a9e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE=
|
||||
golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||
golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k=
|
||||
golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
|
||||
google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng=
|
||||
google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4=
|
||||
64
logger/logger.go
Normal file
64
logger/logger.go
Normal file
@@ -0,0 +1,64 @@
|
||||
/* Copyright (c) 2018, joy.zhou <chowyu08@gmail.com>
|
||||
*/
|
||||
|
||||
package logger
|
||||
|
||||
import (
|
||||
"go.uber.org/zap"
|
||||
"go.uber.org/zap/zapcore"
|
||||
)
|
||||
|
||||
var (
|
||||
// env can be setup at build time with Go Linker. Value could be prod or whatever else for dev env
|
||||
instance *zap.Logger
|
||||
logCfg zap.Config
|
||||
encoderCfg = zap.NewProductionEncoderConfig()
|
||||
)
|
||||
|
||||
func init() {
|
||||
encoderCfg.TimeKey = "timestamp"
|
||||
encoderCfg.EncodeTime = zapcore.ISO8601TimeEncoder
|
||||
}
|
||||
|
||||
// NewDevLogger return a logger for dev builds
|
||||
func NewDevLogger() (*zap.Logger, error) {
|
||||
logCfg := zap.NewProductionConfig()
|
||||
logCfg.Level = zap.NewAtomicLevelAt(zap.DebugLevel)
|
||||
// logCfg.DisableStacktrace = true
|
||||
logCfg.EncoderConfig = encoderCfg
|
||||
return logCfg.Build()
|
||||
}
|
||||
|
||||
// NewProdLogger return a logger for production builds
|
||||
func NewProdLogger() (*zap.Logger, error) {
|
||||
logCfg := zap.NewProductionConfig()
|
||||
logCfg.DisableStacktrace = true
|
||||
logCfg.Level = zap.NewAtomicLevelAt(zap.InfoLevel)
|
||||
logCfg.EncoderConfig = encoderCfg
|
||||
return logCfg.Build()
|
||||
}
|
||||
|
||||
func Prod() *zap.Logger {
|
||||
|
||||
l, _ := NewProdLogger()
|
||||
instance = l
|
||||
|
||||
return instance
|
||||
}
|
||||
|
||||
func Debug() *zap.Logger {
|
||||
|
||||
l, _ := NewDevLogger()
|
||||
instance = l
|
||||
|
||||
return instance
|
||||
}
|
||||
|
||||
func Get() *zap.Logger {
|
||||
if instance == nil {
|
||||
l, _ := NewProdLogger()
|
||||
instance = l
|
||||
}
|
||||
|
||||
return instance
|
||||
}
|
||||
33
logger/logger_test.go
Normal file
33
logger/logger_test.go
Normal file
@@ -0,0 +1,33 @@
|
||||
/*
|
||||
Copyright (c) 2018, joy.zhou <chowyu08@gmail.com>
|
||||
*/
|
||||
package logger
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
func TestGet(t *testing.T) {
|
||||
var l *zap.Logger
|
||||
logger := Get()
|
||||
|
||||
assert.NotNil(t, logger)
|
||||
assert.IsType(t, l, logger)
|
||||
}
|
||||
|
||||
func TestNewDevLogger(t *testing.T) {
|
||||
logger, err := NewDevLogger()
|
||||
|
||||
assert.Nil(t, err)
|
||||
assert.True(t, logger.Core().Enabled(zap.DebugLevel))
|
||||
}
|
||||
|
||||
func TestNewProdLogger(t *testing.T) {
|
||||
logger, err := NewProdLogger()
|
||||
|
||||
assert.Nil(t, err)
|
||||
assert.False(t, logger.Core().Enabled(zap.DebugLevel))
|
||||
}
|
||||
37
main.go
37
main.go
@@ -1,49 +1,30 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"hmq/broker"
|
||||
"os"
|
||||
"os/signal"
|
||||
"runtime"
|
||||
|
||||
log "github.com/cihub/seelog"
|
||||
"github.com/fhmq/hmq/broker"
|
||||
"github.com/fhmq/hmq/logger"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
func init() {
|
||||
testConfig := `
|
||||
<seelog type="sync">
|
||||
<outputs formatid="main">
|
||||
<console/>
|
||||
</outputs>
|
||||
<formats>
|
||||
<format id="main" format="Time:%Date %Time%tfile:%File%tlevel:%LEVEL%t%Msg%n"/>
|
||||
</formats>
|
||||
</seelog>`
|
||||
|
||||
logger, err := log.LoggerFromConfigAsBytes([]byte(testConfig))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
log.ReplaceLogger(logger)
|
||||
}
|
||||
var log = logger.Get()
|
||||
|
||||
func main() {
|
||||
runtime.GOMAXPROCS(runtime.NumCPU())
|
||||
config, er := broker.LoadConfig()
|
||||
if er != nil {
|
||||
log.Error("Load Config file error: ", er)
|
||||
return
|
||||
config, err := broker.ConfigureConfig(os.Args[1:])
|
||||
if err != nil {
|
||||
log.Fatal("configure broker config error", zap.Error(err))
|
||||
}
|
||||
|
||||
b, err := broker.NewBroker(config)
|
||||
if err != nil {
|
||||
log.Error("New Broker error: ", er)
|
||||
return
|
||||
log.Fatal("New Broker error: ", zap.Error(err))
|
||||
}
|
||||
b.Start()
|
||||
|
||||
s := waitForSignal()
|
||||
log.Infof("signal got: %v ,broker closed.", s)
|
||||
log.Info("signal received, broker closed.", zap.Any("signal", s))
|
||||
}
|
||||
|
||||
func waitForSignal() os.Signal {
|
||||
|
||||
27
plugins/auth/auth.go
Normal file
27
plugins/auth/auth.go
Normal file
@@ -0,0 +1,27 @@
|
||||
package auth
|
||||
|
||||
import (
|
||||
authfile "github.com/fhmq/hmq/plugins/auth/authfile"
|
||||
"github.com/fhmq/hmq/plugins/auth/authhttp"
|
||||
)
|
||||
|
||||
const (
|
||||
AuthHTTP = "authhttp"
|
||||
AuthFile = "authfile"
|
||||
)
|
||||
|
||||
type Auth interface {
|
||||
CheckACL(action, clientID, username, ip, topic string) bool
|
||||
CheckConnect(clientID, username, password string) bool
|
||||
}
|
||||
|
||||
func NewAuth(name string) Auth {
|
||||
switch name {
|
||||
case AuthHTTP:
|
||||
return authhttp.Init()
|
||||
case AuthFile:
|
||||
return authfile.Init()
|
||||
default:
|
||||
return &mockAuth{}
|
||||
}
|
||||
}
|
||||
54
plugins/auth/authfile/Readme.md
Normal file
54
plugins/auth/authfile/Readme.md
Normal file
@@ -0,0 +1,54 @@
|
||||
## ACL Configure
|
||||
```
|
||||
Attention: Acl Type Change, change `pub =1, sub=2` to `sub =1, pub=2`
|
||||
```
|
||||
#### The ACL rules define:
|
||||
~~~
|
||||
Allow | type | value | pubsub | Topics
|
||||
~~~
|
||||
#### ACL Config
|
||||
~~~
|
||||
## type clientid , username, ipaddr
|
||||
##sub 1 , pub 2, pubsub 3
|
||||
## %c is clientid , %u is username
|
||||
allow ip 127.0.0.1 2 $SYS/#
|
||||
allow clientid 0001 3 #
|
||||
allow username admin 3 #
|
||||
allow username joy 3 /test,hello/world
|
||||
allow clientid * 1 toCloud/%c
|
||||
allow username * 1 toCloud/%u
|
||||
deny clientid * 3 #
|
||||
~~~
|
||||
|
||||
~~~
|
||||
#allow local sub $SYS topic
|
||||
allow ip 127.0.0.1 1 $SYS/#
|
||||
~~~
|
||||
~~~
|
||||
#allow client who's id with 0001 or username with admin pub sub all topic
|
||||
allow clientid 0001 3 #
|
||||
allow username admin 3 #
|
||||
~~~
|
||||
~~~
|
||||
#allow client with the username joy can pub sub topic '/test' and 'hello/world'
|
||||
allow username joy 3 /test,hello/world
|
||||
~~~
|
||||
~~~
|
||||
#allow all client pub the topic toCloud/{clientid/username}
|
||||
allow clientid * 2 toCloud/%c
|
||||
allow username * 2 toCloud/%u
|
||||
~~~
|
||||
~~~
|
||||
#deny all client pub sub all topic
|
||||
deny clientid * 3 #
|
||||
~~~
|
||||
Client match acl rule one by one
|
||||
~~~
|
||||
--------- --------- ---------
|
||||
Client -> | Rule1 | --nomatch--> | Rule2 | --nomatch--> | Rule3 | -->
|
||||
--------- --------- ---------
|
||||
| | |
|
||||
match match match
|
||||
\|/ \|/ \|/
|
||||
allow | deny allow | deny allow | deny
|
||||
~~~
|
||||
@@ -1,4 +1,4 @@
|
||||
## pub 1 , sub 2, pubsub 3
|
||||
## sub 1 , pub 2, pubsub 3
|
||||
## %c is clientid , %s is username
|
||||
##auth type value pub/sub topic
|
||||
allow ip 127.0.0.1 2 $SYS/#
|
||||
@@ -9,4 +9,4 @@ allow clientid * 1 toCloud/%c
|
||||
allow username * 1 toCloud/%u
|
||||
allow clientid * 2 toDevice/%c
|
||||
allow username * 2 toDevice/%u
|
||||
deny clientid * 3 #
|
||||
deny clientid * 3 #
|
||||
23
plugins/auth/authfile/acl.go
Normal file
23
plugins/auth/authfile/acl.go
Normal file
@@ -0,0 +1,23 @@
|
||||
package acl
|
||||
|
||||
type aclAuth struct {
|
||||
config *ACLConfig
|
||||
}
|
||||
|
||||
func Init() *aclAuth {
|
||||
aclConfig, err := AclConfigLoad("./plugins/auth/authfile/acl.conf")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return &aclAuth{
|
||||
config: aclConfig,
|
||||
}
|
||||
}
|
||||
|
||||
func (a *aclAuth) CheckConnect(clientID, username, password string) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (a *aclAuth) CheckACL(action, clientID, username, ip, topic string) bool {
|
||||
return checkTopicAuth(a.config, action, ip, username, clientID, topic)
|
||||
}
|
||||
23
plugins/auth/authfile/acl_test.go
Normal file
23
plugins/auth/authfile/acl_test.go
Normal file
@@ -0,0 +1,23 @@
|
||||
//+build test
|
||||
|
||||
package acl
|
||||
|
||||
import (
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestOrigAcls(t *testing.T) {
|
||||
pwd, _ := os.Getwd()
|
||||
os.Chdir("../../../")
|
||||
aclOrig := Init()
|
||||
os.Chdir(pwd)
|
||||
|
||||
// rule: allow ip 127.0.0.1 2 $SYS/#
|
||||
origAllowed := aclOrig.CheckACL(PUB, "dummyClientID", "dummyUser", "127.0.0.1", "$SYS/something")
|
||||
assert.True(t, origAllowed)
|
||||
origAllowed = aclOrig.CheckACL(SUB, "dummyClientID", "dummyUser", "127.0.0.1", "$SYS/something")
|
||||
assert.False(t, origAllowed)
|
||||
}
|
||||
@@ -2,20 +2,20 @@ package acl
|
||||
|
||||
import "strings"
|
||||
|
||||
func CheckTopicAuth(ACLInfo *ACLConfig, typ int, ip, username, clientid, topic string) bool {
|
||||
func checkTopicAuth(ACLInfo *ACLConfig, action, ip, username, clientid, topic string) bool {
|
||||
for _, info := range ACLInfo.Info {
|
||||
ctyp := info.Typ
|
||||
switch ctyp {
|
||||
case CLIENTID:
|
||||
if match, auth := info.checkWithClientID(typ, clientid, topic); match {
|
||||
if match, auth := info.checkWithClientID(action, clientid, topic); match {
|
||||
return auth
|
||||
}
|
||||
case USERNAME:
|
||||
if match, auth := info.checkWithUsername(typ, username, topic); match {
|
||||
if match, auth := info.checkWithUsername(action, username, topic); match {
|
||||
return auth
|
||||
}
|
||||
case IP:
|
||||
if match, auth := info.checkWithIP(typ, ip, topic); match {
|
||||
if match, auth := info.checkWithIP(action, ip, topic); match {
|
||||
return auth
|
||||
}
|
||||
}
|
||||
@@ -23,18 +23,18 @@ func CheckTopicAuth(ACLInfo *ACLConfig, typ int, ip, username, clientid, topic s
|
||||
return false
|
||||
}
|
||||
|
||||
func (a *AuthInfo) checkWithClientID(typ int, clientid, topic string) (bool, bool) {
|
||||
func (a *AuthInfo) checkWithClientID(action, clientid, topic string) (bool, bool) {
|
||||
auth := false
|
||||
match := false
|
||||
if a.Val == "*" || a.Val == clientid {
|
||||
for _, tp := range a.Topics {
|
||||
des := strings.Replace(tp, "%c", clientid, -1)
|
||||
if typ == PUB {
|
||||
if action == PUB {
|
||||
if pubTopicMatch(topic, des) {
|
||||
match = true
|
||||
auth = a.checkAuth(PUB)
|
||||
}
|
||||
} else if typ == SUB {
|
||||
} else if action == SUB {
|
||||
if subTopicMatch(topic, des) {
|
||||
match = true
|
||||
auth = a.checkAuth(SUB)
|
||||
@@ -45,18 +45,18 @@ func (a *AuthInfo) checkWithClientID(typ int, clientid, topic string) (bool, boo
|
||||
return match, auth
|
||||
}
|
||||
|
||||
func (a *AuthInfo) checkWithUsername(typ int, username, topic string) (bool, bool) {
|
||||
func (a *AuthInfo) checkWithUsername(action, username, topic string) (bool, bool) {
|
||||
auth := false
|
||||
match := false
|
||||
if a.Val == "*" || a.Val == username {
|
||||
for _, tp := range a.Topics {
|
||||
des := strings.Replace(tp, "%u", username, -1)
|
||||
if typ == PUB {
|
||||
if action == PUB {
|
||||
if pubTopicMatch(topic, des) {
|
||||
match = true
|
||||
auth = a.checkAuth(PUB)
|
||||
}
|
||||
} else if typ == SUB {
|
||||
} else if action == SUB {
|
||||
if subTopicMatch(topic, des) {
|
||||
match = true
|
||||
auth = a.checkAuth(SUB)
|
||||
@@ -67,18 +67,18 @@ func (a *AuthInfo) checkWithUsername(typ int, username, topic string) (bool, boo
|
||||
return match, auth
|
||||
}
|
||||
|
||||
func (a *AuthInfo) checkWithIP(typ int, ip, topic string) (bool, bool) {
|
||||
func (a *AuthInfo) checkWithIP(action, ip, topic string) (bool, bool) {
|
||||
auth := false
|
||||
match := false
|
||||
if a.Val == "*" || a.Val == ip {
|
||||
for _, tp := range a.Topics {
|
||||
des := tp
|
||||
if typ == PUB {
|
||||
if action == PUB {
|
||||
if pubTopicMatch(topic, des) {
|
||||
auth = a.checkAuth(PUB)
|
||||
match = true
|
||||
}
|
||||
} else if typ == SUB {
|
||||
} else if action == SUB {
|
||||
if subTopicMatch(topic, des) {
|
||||
auth = a.checkAuth(SUB)
|
||||
match = true
|
||||
@@ -89,15 +89,15 @@ func (a *AuthInfo) checkWithIP(typ int, ip, topic string) (bool, bool) {
|
||||
return match, auth
|
||||
}
|
||||
|
||||
func (a *AuthInfo) checkAuth(typ int) bool {
|
||||
func (a *AuthInfo) checkAuth(action string) bool {
|
||||
auth := false
|
||||
if typ == PUB {
|
||||
if action == PUB {
|
||||
if a.Auth == ALLOW && (a.PubSub == PUB || a.PubSub == PUBSUB) {
|
||||
auth = true
|
||||
} else if a.Auth == DENY && a.PubSub == SUB {
|
||||
auth = true
|
||||
}
|
||||
} else if typ == SUB {
|
||||
} else if action == SUB {
|
||||
if a.Auth == ALLOW && (a.PubSub == SUB || a.PubSub == PUBSUB) {
|
||||
auth = true
|
||||
} else if a.Auth == DENY && a.PubSub == PUB {
|
||||
@@ -5,14 +5,13 @@ import (
|
||||
"errors"
|
||||
"io"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const (
|
||||
PUB = 1
|
||||
SUB = 2
|
||||
PUBSUB = 3
|
||||
SUB = "1"
|
||||
PUB = "2"
|
||||
PUBSUB = "3"
|
||||
CLIENTID = "clientid"
|
||||
USERNAME = "username"
|
||||
IP = "ip"
|
||||
@@ -24,7 +23,7 @@ type AuthInfo struct {
|
||||
Auth string
|
||||
Typ string
|
||||
Val string
|
||||
PubSub int
|
||||
PubSub string
|
||||
Topics []string
|
||||
}
|
||||
|
||||
@@ -34,21 +33,18 @@ type ACLConfig struct {
|
||||
}
|
||||
|
||||
func AclConfigLoad(file string) (*ACLConfig, error) {
|
||||
if file == "" {
|
||||
file = "./conf/acl.conf"
|
||||
}
|
||||
aclconifg := &ACLConfig{
|
||||
File: file,
|
||||
Info: make([]*AuthInfo, 0, 4),
|
||||
}
|
||||
err := aclconifg.Prase()
|
||||
err := aclconifg.Parse()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return aclconifg, err
|
||||
}
|
||||
|
||||
func (c *ACLConfig) Prase() error {
|
||||
func (c *ACLConfig) Parse() error {
|
||||
f, err := os.Open(c.File)
|
||||
defer f.Close()
|
||||
if err != nil {
|
||||
@@ -79,12 +75,16 @@ func (c *ACLConfig) Prase() error {
|
||||
parseErr = errors.New("\"" + line + "\" format is error")
|
||||
break
|
||||
}
|
||||
var pubsub int
|
||||
pubsub, err = strconv.Atoi(tmpArr[3])
|
||||
if err != nil {
|
||||
if tmpArr[3] != PUB && tmpArr[3] != SUB && tmpArr[3] != PUBSUB {
|
||||
parseErr = errors.New("\"" + line + "\" format is error")
|
||||
break
|
||||
}
|
||||
// var pubsub int
|
||||
// pubsub, err = strconv.Atoi(tmpArr[3])
|
||||
// if err != nil {
|
||||
// parseErr = errors.New("\"" + line + "\" format is error")
|
||||
// break
|
||||
// }
|
||||
topicStr := strings.Replace(tmpArr[4], " ", "", -1)
|
||||
topicStr = strings.Replace(topicStr, "\n", "", -1)
|
||||
topics := strings.Split(topicStr, ",")
|
||||
@@ -93,7 +93,7 @@ func (c *ACLConfig) Prase() error {
|
||||
Typ: tmpArr[1],
|
||||
Val: tmpArr[2],
|
||||
Topics: topics,
|
||||
PubSub: pubsub,
|
||||
PubSub: tmpArr[3],
|
||||
}
|
||||
c.Info = append(c.Info, tmpAuth)
|
||||
if err != nil {
|
||||
179
plugins/auth/authhttp/authhttp.go
Normal file
179
plugins/auth/authhttp/authhttp.go
Normal file
@@ -0,0 +1,179 @@
|
||||
package authhttp
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/fhmq/hmq/logger"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
//Config device kafka config
|
||||
type Config struct {
|
||||
AuthURL string `json:"auth"`
|
||||
ACLURL string `json:"acl"`
|
||||
SuperURL string `json:"super"`
|
||||
}
|
||||
|
||||
type authHTTP struct {
|
||||
client *http.Client
|
||||
}
|
||||
|
||||
var (
|
||||
config Config
|
||||
log = logger.Get().Named("authhttp")
|
||||
httpClient *http.Client
|
||||
)
|
||||
|
||||
//Init init kafak client
|
||||
func Init() *authHTTP {
|
||||
content, err := ioutil.ReadFile("./plugins/auth/authhttp/http.json")
|
||||
if err != nil {
|
||||
log.Fatal("Read config file error: ", zap.Error(err))
|
||||
}
|
||||
// log.Info(string(content))
|
||||
|
||||
err = json.Unmarshal(content, &config)
|
||||
if err != nil {
|
||||
log.Fatal("Unmarshal config file error: ", zap.Error(err))
|
||||
}
|
||||
// fmt.Println("http: config: ", config)
|
||||
|
||||
httpClient = &http.Client{
|
||||
Transport: &http.Transport{
|
||||
MaxConnsPerHost: 100,
|
||||
MaxIdleConns: 100,
|
||||
MaxIdleConnsPerHost: 100,
|
||||
},
|
||||
Timeout: time.Second * 100,
|
||||
}
|
||||
return &authHTTP{client: httpClient}
|
||||
}
|
||||
|
||||
// CheckConnect check mqtt connect
|
||||
func (a *authHTTP) CheckConnect(clientID, username, password string) bool {
|
||||
action := "connect"
|
||||
{
|
||||
aCache := checkCache(action, clientID, username, password, "")
|
||||
if aCache != nil {
|
||||
if aCache.password == password && aCache.username == username && aCache.action == action {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
data := url.Values{}
|
||||
data.Add("username", username)
|
||||
data.Add("clientid", clientID)
|
||||
data.Add("password", password)
|
||||
|
||||
req, err := http.NewRequest("POST", config.AuthURL, strings.NewReader(data.Encode()))
|
||||
if err != nil {
|
||||
log.Error("new request super: ", zap.Error(err))
|
||||
return false
|
||||
}
|
||||
req.Header.Add("Content-Type", "application/x-www-form-urlencoded")
|
||||
req.Header.Add("Content-Length", strconv.Itoa(len(data.Encode())))
|
||||
|
||||
resp, err := a.client.Do(req)
|
||||
if err != nil {
|
||||
log.Error("request super: ", zap.Error(err))
|
||||
return false
|
||||
}
|
||||
|
||||
defer resp.Body.Close()
|
||||
io.Copy(ioutil.Discard, resp.Body)
|
||||
if resp.StatusCode == http.StatusOK {
|
||||
addCache(action, clientID, username, password, "")
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// //CheckSuper check mqtt connect
|
||||
// func CheckSuper(clientID, username, password string) bool {
|
||||
// action := "connect"
|
||||
// {
|
||||
// aCache := checkCache(action, clientID, username, password, "")
|
||||
// if aCache != nil {
|
||||
// if aCache.password == password && aCache.username == username && aCache.action == action {
|
||||
// return true
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
|
||||
// data := url.Values{}
|
||||
// data.Add("username", username)
|
||||
// data.Add("clientid", clientID)
|
||||
// data.Add("password", password)
|
||||
|
||||
// req, err := http.NewRequest("POST", config.SuperURL, strings.NewReader(data.Encode()))
|
||||
// if err != nil {
|
||||
// log.Error("new request super: ", zap.Error(err))
|
||||
// return false
|
||||
// }
|
||||
// req.Header.Add("Content-Type", "application/x-www-form-urlencoded")
|
||||
// req.Header.Add("Content-Length", strconv.Itoa(len(data.Encode())))
|
||||
|
||||
// resp, err := httpClient.Do(req)
|
||||
// if err != nil {
|
||||
// log.Error("request super: ", zap.Error(err))
|
||||
// return false
|
||||
// }
|
||||
|
||||
// defer resp.Body.Close()
|
||||
// io.Copy(ioutil.Discard, resp.Body)
|
||||
|
||||
// if resp.StatusCode == http.StatusOK {
|
||||
// return true
|
||||
// }
|
||||
// return false
|
||||
// }
|
||||
|
||||
//CheckACL check mqtt connect
|
||||
func (a *authHTTP) CheckACL(action, clientID, username, ip, topic string) bool {
|
||||
|
||||
{
|
||||
aCache := checkCache(action, "", username, "", topic)
|
||||
if aCache != nil {
|
||||
if aCache.topic == topic && aCache.action == action {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
req, err := http.NewRequest("GET", config.ACLURL, nil)
|
||||
if err != nil {
|
||||
log.Error("get acl: ", zap.Error(err))
|
||||
return false
|
||||
}
|
||||
|
||||
data := req.URL.Query()
|
||||
|
||||
data.Add("username", username)
|
||||
data.Add("topic", topic)
|
||||
data.Add("access", action)
|
||||
req.URL.RawQuery = data.Encode()
|
||||
// fmt.Println("req:", req)
|
||||
resp, err := a.client.Do(req)
|
||||
if err != nil {
|
||||
log.Error("request acl: ", zap.Error(err))
|
||||
return false
|
||||
}
|
||||
|
||||
defer resp.Body.Close()
|
||||
io.Copy(ioutil.Discard, resp.Body)
|
||||
|
||||
if resp.StatusCode == http.StatusOK {
|
||||
addCache(action, "", username, "", topic)
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
32
plugins/auth/authhttp/cache.go
Normal file
32
plugins/auth/authhttp/cache.go
Normal file
@@ -0,0 +1,32 @@
|
||||
package authhttp
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/patrickmn/go-cache"
|
||||
)
|
||||
|
||||
type authCache struct {
|
||||
action string
|
||||
username string
|
||||
clientID string
|
||||
password string
|
||||
topic string
|
||||
}
|
||||
|
||||
var (
|
||||
// cache = make(map[string]authCache)
|
||||
c = cache.New(5*time.Minute, 10*time.Minute)
|
||||
)
|
||||
|
||||
func checkCache(action, clientID, username, password, topic string) *authCache {
|
||||
authc, found := c.Get(username)
|
||||
if found {
|
||||
return authc.(*authCache)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func addCache(action, clientID, username, password, topic string) {
|
||||
c.Set(username, &authCache{action: action, username: username, clientID: clientID, password: password, topic: topic}, cache.DefaultExpiration)
|
||||
}
|
||||
5
plugins/auth/authhttp/http.json
Normal file
5
plugins/auth/authhttp/http.json
Normal file
@@ -0,0 +1,5 @@
|
||||
{
|
||||
"auth": "http://127.0.0.1:9090/mqtt/auth",
|
||||
"acl": "http://127.0.0.1:9090/mqtt/acl",
|
||||
"super": "http://127.0.0.1:9090/mqtt/superuser"
|
||||
}
|
||||
11
plugins/auth/mock.go
Normal file
11
plugins/auth/mock.go
Normal file
@@ -0,0 +1,11 @@
|
||||
package auth
|
||||
|
||||
type mockAuth struct{}
|
||||
|
||||
func (m *mockAuth) CheckACL(action, clientID, username, ip, topic string) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (m *mockAuth) CheckConnect(clientID, username, password string) bool {
|
||||
return true
|
||||
}
|
||||
50
plugins/bridge/CSVLog.md
Normal file
50
plugins/bridge/CSVLog.md
Normal file
@@ -0,0 +1,50 @@
|
||||
# CSVLog Plugin For HMQ
|
||||
This is a bridge implementation for HMQ that allows messages to be logged to a CSV file at runtime.
|
||||
|
||||
It can be used for debugging/monitoring purposes, for integration with other systems/platforms, or as an audit trail of messages.
|
||||
|
||||
The plugin allows you to define 0, 1, or more filters which determine which messages get bridged. Where no filters are defined the plugin bridges every message. Where one or more filters exist, the plugin applies the filter/s and only brdiges messages that match the filter spec.
|
||||
|
||||
The plugin allows you provide a filename for the output file, and also supports three special filenames {LOG},{STDOUT}, and {NULL}. {LOG} results in messages being bridged to the log, {STDOUT} bridges them to Std out, and {NULL} simply skips and returns without an error.
|
||||
|
||||
## Configuration
|
||||
The configiration settings for CSVLog are defined by the struct csvBridgeConfig.
|
||||
```
|
||||
type csvBridgeConfig struct {
|
||||
FileName string `json:"fileName"`
|
||||
LogFileMaxSizeMB int64 `json:"logFileMaxSizeMB"`
|
||||
LogFileMaxFiles int64 `json:"logFileMaxFiles"`
|
||||
WriteIntervalSecs int64 `json:"writeIntervalSecs"`
|
||||
CommandTopic string `json:"commandTopic"`
|
||||
Filters []string `json:"filters"`
|
||||
}
|
||||
```
|
||||
| Setting | Description |
|
||||
| ----------- | ----------- |
|
||||
| FileName | A complete filename for the output file, or {LOG} to send bridged messages to the log, {STDOUT} to send bridged messages to STDOUT, or {NULL} to not bridge anything at all |
|
||||
| LogFileMaxSizeMB | The size in megabytes at which the log file is rotated |
|
||||
| LogFileMaxFiles | The maximum number of rotated logfiles to retain before they're deleted |
|
||||
| WriteIntervalSecs | The delay before flushing any pending writes to the file |
|
||||
| CommandTopic | The name of a topic to which commands relating to CSVLog will be sent eg "bridge/CSVLOG/command" |
|
||||
| Filters | An array of filter specifications which are used to determine which messages are bridged, if there are no filters specified the filter is assumed to be "#" which bridges everything. Filters are specified the same way that topic acls are described|
|
||||
|
||||
## Filters
|
||||
|
||||
Filters use the same syntax as for ACL permissions.
|
||||
|
||||
So a filter can name a specific topic..
|
||||
|
||||
"animals/cats" will bridge messages sent to the "animals/cats" topic.
|
||||
|
||||
A filter can use the + or # wildcards so
|
||||
|
||||
"animals/cats/+" will bridge messages sent to "animals/cats/breeds", "animals/cats/colours" but not "animals/cats/breeds/longhair"
|
||||
|
||||
"animals/cats/#" will bridge messages sent to "animals/cats/breeds", "animals/cats/colours", "animals/cats/breeds/longhair", etc
|
||||
|
||||
## Commands
|
||||
Currently two commands can be sent to the CSVLog bridge:
|
||||
|
||||
ROTATEFILE - Triggers an immediate rotation of the log file
|
||||
|
||||
REALOADCONFIG - Triggers a reload of the CSVLog config file
|
||||
53
plugins/bridge/bridge.go
Normal file
53
plugins/bridge/bridge.go
Normal file
@@ -0,0 +1,53 @@
|
||||
package bridge
|
||||
|
||||
import "github.com/fhmq/hmq/logger"
|
||||
|
||||
const (
|
||||
//Connect mqtt connect
|
||||
Connect = "connect"
|
||||
//Publish mqtt publish
|
||||
Publish = "publish"
|
||||
//Subscribe mqtt sub
|
||||
Subscribe = "subscribe"
|
||||
//Unsubscribe mqtt sub
|
||||
Unsubscribe = "unsubscribe"
|
||||
//Disconnect mqtt disconenct
|
||||
Disconnect = "disconnect"
|
||||
)
|
||||
|
||||
var (
|
||||
log = logger.Get().Named("bridge")
|
||||
)
|
||||
|
||||
//Elements kafka publish elements
|
||||
type Elements struct {
|
||||
ClientID string `json:"clientid"`
|
||||
Username string `json:"username"`
|
||||
Topic string `json:"topic"`
|
||||
Payload string `json:"payload"`
|
||||
Timestamp int64 `json:"ts"`
|
||||
Size int32 `json:"size"`
|
||||
Action string `json:"action"`
|
||||
}
|
||||
|
||||
const (
|
||||
//Kafka plugin name
|
||||
Kafka = "kafka"
|
||||
CSVLog = "csvlog"
|
||||
)
|
||||
|
||||
type BridgeMQ interface {
|
||||
// Publish return true to cost the message
|
||||
Publish(e *Elements) (bool, error)
|
||||
}
|
||||
|
||||
func NewBridgeMQ(name string) BridgeMQ {
|
||||
switch name {
|
||||
case Kafka:
|
||||
return InitKafka()
|
||||
case CSVLog:
|
||||
return InitCSVLog()
|
||||
default:
|
||||
return &mockMQ{}
|
||||
}
|
||||
}
|
||||
414
plugins/bridge/csvlog.go
Normal file
414
plugins/bridge/csvlog.go
Normal file
@@ -0,0 +1,414 @@
|
||||
package bridge
|
||||
|
||||
/*
|
||||
Copyright (c) 2021, Gary Barnett @thinkovation. Released under the Apache 2 License
|
||||
|
||||
CSVLog is a bridge plugin for HMQ that implements CSV logging of messages. See CSVLog.md for more information
|
||||
|
||||
*/
|
||||
|
||||
import (
|
||||
"encoding/csv"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
type csvBridgeConfig struct {
|
||||
FileName string `json:"fileName"`
|
||||
LogFileMaxSizeMB int64 `json:"logFileMaxSizeMB"`
|
||||
LogFileMaxFiles int64 `json:"logFileMaxFiles"`
|
||||
WriteIntervalSecs int64 `json:"writeIntervalSecs"`
|
||||
CommandTopic string `json:"commandTopic"`
|
||||
Filters []string `json:"filters"`
|
||||
}
|
||||
|
||||
type csvLog struct {
|
||||
config csvBridgeConfig
|
||||
buffer []string
|
||||
msgchan chan (*Elements)
|
||||
|
||||
sync.RWMutex
|
||||
}
|
||||
|
||||
// rotateLog performs a log rotation - copying the current logfile to the base file name plus a timestamp
|
||||
func (c *csvLog) rotateLog(withPrune bool) error {
|
||||
c.Lock()
|
||||
filename := c.config.FileName
|
||||
c.Unlock()
|
||||
|
||||
basename := strings.TrimSuffix(filename, filepath.Ext(filename))
|
||||
newpath := basename + time.Now().Format("-20060102T150405") + filepath.Ext(filename)
|
||||
renameError := os.Rename(filename, newpath)
|
||||
if renameError != nil {
|
||||
return renameError
|
||||
}
|
||||
outfile, _ := os.OpenFile(filename, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
|
||||
outfile.Close()
|
||||
// Whenever we rotate a logfile we prune
|
||||
if withPrune {
|
||||
c.logFilePrune()
|
||||
|
||||
}
|
||||
return nil
|
||||
|
||||
}
|
||||
|
||||
// writeToLog takes an array of elements and writes them to the logfile (or to log or stdout) spefified in
|
||||
// the configuration
|
||||
func (c *csvLog) writeToLog(els []Elements) error {
|
||||
|
||||
c.RLock()
|
||||
fname := c.config.FileName
|
||||
c.RUnlock()
|
||||
if fname == "" {
|
||||
fname = "CSVLOG.CSV"
|
||||
}
|
||||
|
||||
if fname == "{LOG}" {
|
||||
for _, value := range els {
|
||||
t := time.Unix(value.Timestamp, 0)
|
||||
log.Info(t.Format("2006-01-02T15:04:05") + " " + value.ClientID + " " + value.Username + " " + value.Action + " " + value.Topic + " " + value.Payload)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
if fname == "{STDOUT}" {
|
||||
for _, value := range els {
|
||||
t := time.Unix(value.Timestamp, 0)
|
||||
fmt.Println(t.Format("2006-01-02T15:04:05") + " " + value.ClientID + " " + value.Username + " " + value.Action + " " + value.Topic + " " + value.Payload)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
var mbsize int64
|
||||
fileStat, fileStatErr := os.Stat(fname)
|
||||
if fileStatErr != nil {
|
||||
log.Warn("Could not get CSVLog info. Received Err " + fileStatErr.Error())
|
||||
mbsize = 0
|
||||
} else {
|
||||
mbsize = fileStat.Size() / 1024 / 1024
|
||||
}
|
||||
if mbsize > c.config.LogFileMaxSizeMB && c.config.LogFileMaxSizeMB != 0 {
|
||||
rotateErr := c.rotateLog(true)
|
||||
if rotateErr != nil {
|
||||
log.Warn("Unable to rotate outputfile")
|
||||
}
|
||||
}
|
||||
outfile, outfileOpenError := os.OpenFile(fname, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
|
||||
defer outfile.Close()
|
||||
if outfileOpenError != nil {
|
||||
log.Warn("Could not open CSV Log file to write")
|
||||
return errors.New("Could not write to CSV Log File")
|
||||
}
|
||||
|
||||
writer := csv.NewWriter(outfile)
|
||||
defer writer.Flush()
|
||||
|
||||
for _, value := range els {
|
||||
t := time.Unix(value.Timestamp, 0)
|
||||
var outrow = []string{t.Format("2006-01-02T15:04:05"), value.ClientID, value.Username, value.Action, value.Topic, value.Payload}
|
||||
writeOutRowError := writer.Write(outrow)
|
||||
if writeOutRowError != nil {
|
||||
log.Warn("Could not write msg to CSV Log")
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
||||
}
|
||||
|
||||
// Worker should be invoked as a goroutine - It listens on the csvlog message channel for incoming messages
|
||||
// for performance we batch messages into an outqueue and write them in bulk when a timer expires
|
||||
func (c *csvLog) Worker() {
|
||||
log.Info("Running CSVLog worker")
|
||||
var outqueue []Elements
|
||||
|
||||
for true {
|
||||
c.RLock()
|
||||
waitInterval := c.config.WriteIntervalSecs
|
||||
c.RUnlock()
|
||||
|
||||
timer := time.NewTimer(time.Second * time.Duration(waitInterval))
|
||||
|
||||
select {
|
||||
|
||||
case p := <-c.msgchan:
|
||||
c.RLock()
|
||||
|
||||
oktopublish := false
|
||||
|
||||
// Check to see if any filters are defined. If there are none we assume we're logging everything
|
||||
if len(c.config.Filters) != 0 {
|
||||
// We pick up a Read lock here to parse the c.config.Filters string array
|
||||
// as it's a read lock, and write locks will be rare
|
||||
// it feels as if this will be fine.
|
||||
// If there is contention, it _might_ make sense to quickly lock c, get
|
||||
// the filters and release the lock, then process the filters with no locks
|
||||
// but I think it's unlikely
|
||||
|
||||
for _, filt := range c.config.Filters {
|
||||
if topicMatch(p.Topic, filt) {
|
||||
oktopublish = true
|
||||
break
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
} else {
|
||||
oktopublish = true
|
||||
}
|
||||
if oktopublish {
|
||||
var el Elements
|
||||
el.Action = p.Action
|
||||
el.ClientID = p.ClientID
|
||||
el.Payload = p.Payload
|
||||
el.Size = p.Size
|
||||
el.Timestamp = p.Timestamp
|
||||
el.Topic = p.Topic
|
||||
el.Username = p.Username
|
||||
outqueue = append(outqueue, el)
|
||||
}
|
||||
c.RUnlock()
|
||||
break
|
||||
case <-timer.C:
|
||||
if len(outqueue) > 0 {
|
||||
writeResult := c.writeToLog(outqueue)
|
||||
if writeResult != nil {
|
||||
log.Warn("Trouble writing to CSV Log")
|
||||
}
|
||||
outqueue = nil
|
||||
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
// LoadCSVLogConfig loads the configuration file - it currently looks in
|
||||
// "./plugins/csvlog/csvlogconfig.json" (following the example of the default location of the kafka plugin config file)
|
||||
// if it doesn't find it there it looks in two further places - the current directory and
|
||||
// an "assets" folder under the current directory (This is for compatibility with a couple of deployed)
|
||||
// implementations.
|
||||
func LoadCSVLogConfig() csvBridgeConfig {
|
||||
// Check to see if the CSVLOGCONFFILE environment variable is set and if so
|
||||
// check that it does actually point to a file
|
||||
csvLogConfigFile := os.Getenv("CSVLOGCONFFILE")
|
||||
if csvLogConfigFile != "" {
|
||||
if _, err := os.Stat(csvLogConfigFile); os.IsNotExist(err) {
|
||||
csvLogConfigFile = ""
|
||||
}
|
||||
}
|
||||
// If csvLogConfigFile is blank look in the plugins directory,
|
||||
// then the current directory for the csvLogConfigFile. If it's still not found we use a default config
|
||||
// If the file does not exist, we use default parameters
|
||||
if csvLogConfigFile == "" {
|
||||
csvLogConfigFile = "./plugins/csvlog/csvlogconfig.json"
|
||||
}
|
||||
if _, err := os.Stat(csvLogConfigFile); os.IsNotExist(err) {
|
||||
|
||||
if _, err := os.Stat("csvlogconfig.json"); os.IsNotExist(err) {
|
||||
csvLogConfigFile = ""
|
||||
} else {
|
||||
csvLogConfigFile = "csvlogconfig.json"
|
||||
}
|
||||
}
|
||||
|
||||
var configUnmarshalErr error
|
||||
var config csvBridgeConfig
|
||||
if csvLogConfigFile != "" {
|
||||
log.Info("Trying to load config file from " + csvLogConfigFile)
|
||||
content, err := ioutil.ReadFile(csvLogConfigFile)
|
||||
if err != nil {
|
||||
log.Info("Read config file error: ", zap.Error(err))
|
||||
}
|
||||
configUnmarshalErr = json.Unmarshal(content, &config)
|
||||
}
|
||||
|
||||
if configUnmarshalErr != nil || config.FileName == "" {
|
||||
log.Warn("Unable to load csvlog config file, so using default settings")
|
||||
config.FileName = "/var/log/csvlog.log"
|
||||
config.CommandTopic = "CSVLOG/command"
|
||||
config.WriteIntervalSecs = 10
|
||||
config.LogFileMaxSizeMB = 1
|
||||
config.LogFileMaxFiles = 4
|
||||
|
||||
}
|
||||
return config
|
||||
|
||||
}
|
||||
|
||||
// InitCSVLog initialises a CSVLOG plugin
|
||||
// It does this by loading a config file if one can be found. The default filename follows the same
|
||||
// convention as the kafka plugin - ie it's in "./plugins/csvlog/csvlogconfig.json" but an
|
||||
// environment var - CSVLOGCONFFILE - can be set to provide a different location.
|
||||
//
|
||||
// Once the config is set the worker is started
|
||||
func InitCSVLog() *csvLog {
|
||||
log.Info("Trying to init CSVLOG")
|
||||
|
||||
c := &csvLog{config: LoadCSVLogConfig()}
|
||||
c.msgchan = make(chan *Elements, 200)
|
||||
//Start the csvlog worker
|
||||
go c.Worker()
|
||||
return c
|
||||
|
||||
}
|
||||
|
||||
// topicMatch accepts a topic name and a filter string, it then evaluates the
|
||||
// topic against the filter string and returns true if there is a match.
|
||||
//
|
||||
// The CSV bridge can be configured with 0, 1 or more filters - Where there are no
|
||||
// filters specified, every message will be re-published. Where there are filters, any message
|
||||
// that passes any of the filter tests will be re-published.
|
||||
func topicMatch(topic string, filter string) bool {
|
||||
if topic == filter || filter == "#" {
|
||||
return true
|
||||
}
|
||||
topicComponents := strings.Split(topic, "/")
|
||||
filterComponents := strings.Split(filter, "/")
|
||||
currentpos := 0
|
||||
filterComponentsLength := len(filterComponents)
|
||||
currentFilterComponent := ""
|
||||
if filterComponentsLength > 0 {
|
||||
currentFilterComponent = filterComponents[currentpos]
|
||||
}
|
||||
for _, topicVal := range topicComponents {
|
||||
if currentFilterComponent == "" {
|
||||
return false
|
||||
}
|
||||
if currentFilterComponent == "#" {
|
||||
return true
|
||||
}
|
||||
if currentFilterComponent != "+" && currentFilterComponent != topicVal {
|
||||
return false
|
||||
}
|
||||
currentpos++
|
||||
if filterComponentsLength > currentpos {
|
||||
currentFilterComponent = filterComponents[currentpos]
|
||||
} else {
|
||||
currentFilterComponent = ""
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// logFilePrune checks the number of rotated logfiles and prunes them
|
||||
func (c *csvLog) logFilePrune() error {
|
||||
|
||||
// List the rotated files
|
||||
c.RLock()
|
||||
filename := c.config.FileName
|
||||
maxfiles := c.config.LogFileMaxFiles
|
||||
c.RUnlock()
|
||||
if maxfiles == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
fileExt := filepath.Ext(filename)
|
||||
fileDir := filepath.Dir(filename)
|
||||
baseFileName := strings.TrimSuffix(filepath.Base(filename), fileExt)
|
||||
|
||||
files, err := ioutil.ReadDir(fileDir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var foundFiles []string
|
||||
for _, file := range files {
|
||||
if strings.HasPrefix(file.Name(), baseFileName+"-") {
|
||||
|
||||
foundFiles = append(foundFiles, file.Name())
|
||||
|
||||
}
|
||||
}
|
||||
if len(foundFiles) >= int(maxfiles) {
|
||||
fmt.Println("Found ", len(foundFiles), " files")
|
||||
sort.Strings(foundFiles)
|
||||
for i := 0; i < len(foundFiles)-int(maxfiles); i++ {
|
||||
fileDeleteError := os.Remove(fileDir + "//" + foundFiles[i])
|
||||
log.Info("Pruning logfile " + fileDir + "//" + foundFiles[i])
|
||||
if fileDeleteError != nil {
|
||||
log.Warn("Could not delete file " + fileDir + "//" + foundFiles[i])
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
}
|
||||
|
||||
// Publish implements the bridge interface - it accepts an Element then checks to see if that element is a
|
||||
// message published to the admin topic for the plugin
|
||||
//
|
||||
func (c *csvLog) Publish(e *Elements) (bool, error) {
|
||||
// A short-lived lock on c allows us to
|
||||
// get the Command topic then release the lock
|
||||
// This then allows us to process the command - which may
|
||||
// take its a write lock on c (to update values) and then
|
||||
// return here where we'll pick up a
|
||||
// read lock to iterate over the c.config.filters
|
||||
// We're trying to minimise the time spent in this function
|
||||
// and to limit the overall time spent in any write locks.
|
||||
c.RLock()
|
||||
//CSVLOG allows you to configure a CommandTopic which is a topic to which commands affecting the behaviour of CSVLog can be sent
|
||||
//The simplest would be a message with a payload of "RELOAD" which will reload the configuration allowing configuration changes to be
|
||||
//made at runtime without restarting the broker
|
||||
CommandTopic := c.config.CommandTopic
|
||||
OutFile := c.config.FileName
|
||||
c.RUnlock()
|
||||
// If the outfile is set to "{NULL}" we don't do anything with the message - we just return nil
|
||||
// This feature is here to allow CSVLOG to be enabled/disabled at runtime
|
||||
if OutFile == "{NULL}" {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
if e.Topic == CommandTopic {
|
||||
|
||||
log.Info("CSVLOG Command Received")
|
||||
|
||||
// Process Command
|
||||
// These are going to be rare ocurrences, so in this implementation
|
||||
// we will process the command here - but if we _really_ want to
|
||||
// squeeze delays out, we could have a worker sitting on a
|
||||
// command channel processing any commands.
|
||||
if e.Payload == "RELOADCONFIG" {
|
||||
newConfig := LoadCSVLogConfig()
|
||||
c.Lock()
|
||||
c.config = newConfig
|
||||
c.Unlock()
|
||||
|
||||
}
|
||||
if e.Payload == "ROTATEFILE" {
|
||||
|
||||
c.rotateLog(true)
|
||||
|
||||
}
|
||||
if e.Payload == "ROTATEFILENOPRUNE" {
|
||||
|
||||
c.rotateLog(false)
|
||||
|
||||
}
|
||||
// We could return without doing anything more here, but
|
||||
// for now we move ahead with the filter processing on the
|
||||
// basis that unless we either filter for "all" (with #) or
|
||||
// filter for the CommandTopic, they won't be logged - but we
|
||||
// may have a reason for wanting to track commands too
|
||||
}
|
||||
// Push the message into the channel and return
|
||||
// the channel is buffered and is read by a goroutine so this should block for the shortest possible time
|
||||
c.msgchan <- e
|
||||
return false, nil
|
||||
}
|
||||
36
plugins/bridge/csvlog_test.go
Normal file
36
plugins/bridge/csvlog_test.go
Normal file
@@ -0,0 +1,36 @@
|
||||
package bridge
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
)
|
||||
|
||||
//Test_topicMatch is here to double check the topic matching logic
|
||||
func Test_topicMatch(t *testing.T) {
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
topic string
|
||||
filter string
|
||||
want bool
|
||||
}{
|
||||
// Some sample test cases
|
||||
{name: "Simple", topic: "test", filter: "test", want: true},
|
||||
{name: "Simple", topic: "test/cat", filter: "test/+", want: true},
|
||||
{name: "Simple", topic: "test/cat/breed", filter: "test/+", want: false},
|
||||
{name: "Simple", topic: "test/cat", filter: "test/#", want: true},
|
||||
{name: "Simple", topic: "test/cat/banana", filter: "test/#", want: true},
|
||||
{name: "Simple", topic: "test/cat/banana", filter: "test/+", want: false},
|
||||
{name: "Simple", topic: "test/dog/banana", filter: "test/cat/+", want: false},
|
||||
{name: "Simple", topic: "test/cat/banana", filter: "test/+/banana", want: true},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
fmt.Println(tt)
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if got := topicMatch(tt.topic, tt.filter); got != tt.want {
|
||||
t.Errorf("topicMatch() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
156
plugins/bridge/kafka.go
Normal file
156
plugins/bridge/kafka.go
Normal file
@@ -0,0 +1,156 @@
|
||||
package bridge
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"io/ioutil"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/Shopify/sarama"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
type kafkaConfig struct {
|
||||
Addr []string `json:"addr"`
|
||||
ConnectTopic string `json:"onConnect"`
|
||||
SubscribeTopic string `json:"onSubscribe"`
|
||||
PublishTopic string `json:"onPublish"`
|
||||
UnsubscribeTopic string `json:"onUnsubscribe"`
|
||||
DisconnectTopic string `json:"onDisconnect"`
|
||||
DeliverMap map[string]string `json:"deliverMap"`
|
||||
}
|
||||
|
||||
type kafka struct {
|
||||
kafkaConfig kafkaConfig
|
||||
kafkaClient sarama.AsyncProducer
|
||||
}
|
||||
|
||||
// InitKafka Init kafka client
|
||||
func InitKafka() *kafka {
|
||||
log.Info("start connect kafka....")
|
||||
content, err := ioutil.ReadFile("./plugins/kafka/kafka.json")
|
||||
if err != nil {
|
||||
log.Fatal("Read config file error: ", zap.Error(err))
|
||||
}
|
||||
// log.Info(string(content))
|
||||
var config kafkaConfig
|
||||
err = json.Unmarshal(content, &config)
|
||||
if err != nil {
|
||||
log.Fatal("Unmarshal config file error: ", zap.Error(err))
|
||||
}
|
||||
c := &kafka{kafkaConfig: config}
|
||||
c.connect()
|
||||
return c
|
||||
}
|
||||
|
||||
//connect
|
||||
func (k *kafka) connect() {
|
||||
conf := sarama.NewConfig()
|
||||
conf.Version = sarama.V1_1_1_0
|
||||
kafkaClient, err := sarama.NewAsyncProducer(k.kafkaConfig.Addr, conf)
|
||||
if err != nil {
|
||||
log.Fatal("create kafka async producer failed: ", zap.Error(err))
|
||||
}
|
||||
|
||||
go func() {
|
||||
for err := range kafkaClient.Errors() {
|
||||
log.Error("send msg to kafka failed: ", zap.Error(err))
|
||||
}
|
||||
}()
|
||||
|
||||
k.kafkaClient = kafkaClient
|
||||
}
|
||||
|
||||
//Publish publish to kafka
|
||||
func (k *kafka) Publish(e *Elements) (bool, error) {
|
||||
config := k.kafkaConfig
|
||||
key := e.ClientID
|
||||
topics := make(map[string]bool)
|
||||
switch e.Action {
|
||||
case Connect:
|
||||
if config.ConnectTopic != "" {
|
||||
topics[config.ConnectTopic] = true
|
||||
}
|
||||
case Publish:
|
||||
if config.PublishTopic != "" {
|
||||
topics[config.PublishTopic] = true
|
||||
}
|
||||
// foreach regexp map config
|
||||
for reg, topic := range config.DeliverMap {
|
||||
match := matchTopic(reg, e.Topic)
|
||||
if match {
|
||||
topics[topic] = true
|
||||
}
|
||||
}
|
||||
case Subscribe:
|
||||
if config.SubscribeTopic != "" {
|
||||
topics[config.SubscribeTopic] = true
|
||||
}
|
||||
case Unsubscribe:
|
||||
if config.UnsubscribeTopic != "" {
|
||||
topics[config.UnsubscribeTopic] = true
|
||||
}
|
||||
case Disconnect:
|
||||
if config.DisconnectTopic != "" {
|
||||
topics[config.DisconnectTopic] = true
|
||||
}
|
||||
default:
|
||||
return false, errors.New("error action: " + e.Action)
|
||||
}
|
||||
|
||||
return false, k.publish(topics, key, e)
|
||||
|
||||
}
|
||||
|
||||
func (k *kafka) publish(topics map[string]bool, key string, msg *Elements) error {
|
||||
payload, err := json.Marshal(msg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for topic, _ := range topics {
|
||||
select {
|
||||
case k.kafkaClient.Input() <- &sarama.ProducerMessage{
|
||||
Topic: topic,
|
||||
Key: sarama.ByteEncoder(key),
|
||||
Value: sarama.ByteEncoder(payload),
|
||||
}:
|
||||
continue
|
||||
case <-time.After(5 * time.Second):
|
||||
return errors.New("write kafka timeout")
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func match(subTopic []string, topic []string) bool {
|
||||
if len(subTopic) == 0 {
|
||||
if len(topic) == 0 {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
if len(topic) == 0 {
|
||||
if subTopic[0] == "#" {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
if subTopic[0] == "#" {
|
||||
return true
|
||||
}
|
||||
|
||||
if (subTopic[0] == "+") || (subTopic[0] == topic[0]) {
|
||||
return match(subTopic[1:], topic[1:])
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func matchTopic(subTopic string, topic string) bool {
|
||||
return match(strings.Split(subTopic, "/"), strings.Split(topic, "/"))
|
||||
}
|
||||
14
plugins/bridge/kafka/kafka.json
Normal file
14
plugins/bridge/kafka/kafka.json
Normal file
@@ -0,0 +1,14 @@
|
||||
{
|
||||
"addr": [
|
||||
"127.0.0.1:9090"
|
||||
],
|
||||
"onConnect": "onConnect",
|
||||
"onPublish": "onPublish",
|
||||
"onSubscribe": "onSubscribe",
|
||||
"onDisconnect": "onDisconnect",
|
||||
"onUnsubscribe": "onUnsubscribe",
|
||||
"deliverMap": {
|
||||
"#": "publish",
|
||||
"/upload/+/#": "upload"
|
||||
}
|
||||
}
|
||||
7
plugins/bridge/mock.go
Normal file
7
plugins/bridge/mock.go
Normal file
@@ -0,0 +1,7 @@
|
||||
package bridge
|
||||
|
||||
type mockMQ struct{}
|
||||
|
||||
func (m *mockMQ) Publish(e *Elements) (bool, error) {
|
||||
return false, nil
|
||||
}
|
||||
56
pool/fixpool.go
Normal file
56
pool/fixpool.go
Normal file
@@ -0,0 +1,56 @@
|
||||
package pool
|
||||
|
||||
import (
|
||||
"github.com/cespare/xxhash/v2"
|
||||
)
|
||||
|
||||
type WorkerPool struct {
|
||||
maxWorkers int
|
||||
taskQueue []chan func()
|
||||
stoppedChan chan struct{}
|
||||
}
|
||||
|
||||
func New(maxWorkers int) *WorkerPool {
|
||||
// There must be at least one worker.
|
||||
if maxWorkers < 1 {
|
||||
maxWorkers = 1
|
||||
}
|
||||
|
||||
// taskQueue is unbuffered since items are always removed immediately.
|
||||
pool := &WorkerPool{
|
||||
taskQueue: make([]chan func(), maxWorkers),
|
||||
maxWorkers: maxWorkers,
|
||||
stoppedChan: make(chan struct{}),
|
||||
}
|
||||
// Start the task dispatcher.
|
||||
pool.dispatch()
|
||||
|
||||
return pool
|
||||
}
|
||||
|
||||
func (p *WorkerPool) Submit(uid string, task func()) {
|
||||
idx := xxhash.Sum64([]byte(uid)) % uint64(p.maxWorkers)
|
||||
if task != nil {
|
||||
p.taskQueue[idx] <- task
|
||||
}
|
||||
}
|
||||
|
||||
func (p *WorkerPool) dispatch() {
|
||||
for i := 0; i < p.maxWorkers; i++ {
|
||||
p.taskQueue[i] = make(chan func(), 1024)
|
||||
go startWorker(p.taskQueue[i])
|
||||
}
|
||||
}
|
||||
|
||||
func startWorker(taskChan chan func()) {
|
||||
var task func()
|
||||
var ok bool
|
||||
for {
|
||||
task, ok = <-taskChan
|
||||
if !ok {
|
||||
break
|
||||
}
|
||||
// Execute the task.
|
||||
task()
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user