Compare commits

...

496 Commits

Author SHA1 Message Date
Cameron Sparr
19e5d975ca Updating CHANGELOG and README for version 0.2.1 2015-11-16 16:23:50 -07:00
Cameron Sparr
5664625f67 Update README, CHANGELOG, and unit tests with list output 2015-11-16 10:43:03 -07:00
Daniel Malon
375045953f FreeBSD compatibility
- Use gopsutils istead of gosigar
- Bump go-dockerclient

closes #372
2015-11-16 10:32:58 -07:00
Cameron Sparr
b10b186cc8 Allow users to specify outputs as lists
This will provide the ability to specify multiple outputs for a single
type of output.

In essence, allowing this:

[outputs]

[[outputs.influxdb]]
  urls = ["udp://localhost:8089"]
  database = "udp-telegraf"

[[outputs.influxdb]]
  urls = ["http://myhost:8086"]
  database = "telegraf"

[[outputs.kafka]]
  brokers = ["192.168.99.100:9092"]
  topic = "telegraf"

closes #335
2015-11-16 10:01:28 -07:00
Cameron Sparr
bf8e0f4cae CHANGELOG update 2015-11-13 14:42:21 -07:00
Cameron Sparr
a6ae597dfc MQTT output unit tests w/ docker container 2015-11-13 13:42:06 -07:00
Cameron Sparr
b975419bc7 Apache plugin unit tests and README 2015-11-13 13:01:00 -07:00
Cameron Sparr
0f036d6bec InfluxDB output: add tests and a README 2015-11-13 10:42:35 -07:00
Codeb Fan
20fbfc7006 Twemproxy go fmt and bug fixups, CHANGELOG, README
closes #365
2015-11-13 09:43:48 -07:00
Codeb Fan
e167b72b16 Add plugin for Twemproxy
This plugin collects data from Twemproxy's stats interface
2015-11-13 09:40:29 -07:00
Cameron Sparr
68ef07bff6 Update CHANGELOG with UDP output 2015-11-12 16:02:46 -07:00
Cameron Sparr
10a20e208a Godep update and dependency resolution 2015-11-12 15:20:01 -07:00
Cameron Sparr
e10394ba3b Use the UDP client for writing to InfluxDB 2015-11-12 14:52:35 -07:00
鲁晓敏
019585f0db phpfpm: add socket fcgi support 2015-11-12 10:44:51 -07:00
鲁晓敏
e619845ffe measurement name should have prefix before ShouldPass check 2015-11-12 10:43:50 -07:00
Cameron Sparr
3012928452 Fix config file tab indentation 2015-11-12 09:52:35 -07:00
Cameron Sparr
352ccde52b Fix new error return of client.NewPoint 2015-11-11 15:38:22 -07:00
Cameron Sparr
92fb51026a Godep update: gopsutil 2015-11-11 15:38:22 -07:00
Cameron Sparr
acf9c1141a Change duration -> internal and implement private gopsutil methods 2015-11-11 15:38:22 -07:00
Cameron Sparr
a8bcc51071 Godep update: influxdb 2015-11-11 15:38:22 -07:00
Cameron Sparr
dcd1c6766c Godep save: gopsutil 2015-11-11 15:38:22 -07:00
Cameron Sparr
00ee2529bc Revert "redis: support IPv6 addresses with no port"
This reverts commit 2af97cdbcb.
2015-11-11 15:33:58 -07:00
Nicholas Katsaros
2af97cdbcb redis: support IPv6 addresses with no port
closes #356
2015-11-10 10:02:42 -07:00
martinrusev
1accab02ed Amon output
closes #350
2015-11-09 10:44:28 -07:00
Roman Statsevich
1a05899be0 removed "panic" from zfs plugin
also added zfs plugin to README.md

closes #341
2015-11-09 10:38:33 -07:00
Roman Statsevich
d54f6be639 add ZFS plugin 2015-11-09 10:37:36 -07:00
Subhachandra Chandra
00614026b3 Added parameters "Devices" and "SkipSerialNumber to DiskIO plugin.
"Devices" can be used to specify storage devices on which stats
should be reported. "SkipSerialNumber" can be used to omit
the device serial number.

Added tests to verify the new parameters.

closes #344
2015-11-06 17:11:57 -07:00
saiello
acf1da4d30 Added jolokia README.md
closes #337
2015-11-06 14:08:07 -07:00
saiello
921ffb7bdb Test for jolokia plugin 2015-11-06 14:07:02 -07:00
saiello
b2e22cbc59 Add fields value test methods 2015-11-06 14:07:02 -07:00
saiello
55c598f9ff Create a JolokiaClient. allowing to inject a stub implementation 2015-11-06 14:07:02 -07:00
saiello
eabc0875de Fixed sampleconfig 2015-11-06 14:07:02 -07:00
saiello
62270a3697 go fmt run over jolokia.go 2015-11-06 14:07:02 -07:00
saiello
40d8aeecb0 Use url.Parse to validate configuration params 2015-11-06 14:07:02 -07:00
saiello
2daa9ff260 Added Tags as toml field 2015-11-06 14:07:02 -07:00
Simone Aiello
25fd4297a8 Jolokia plugin first commit 2015-11-06 14:07:02 -07:00
cornerot
f05d89ed72 removed "panic" from bcache plugin
closes #343
2015-11-06 14:05:09 -07:00
Sean Beckett
4c2501be95 updating Golang crypto 2015-11-04 15:30:01 -08:00
Cameron Sparr
e2854232d0 Change HAProxy plugin tag from host to server
fixes #342
2015-11-03 11:21:58 -07:00
Cameron Sparr
6794fd06eb Suggest running as telegraf user in test mode in README
Fixes #330
2015-11-03 11:18:57 -07:00
Cameron Sparr
befc906167 Improve the HTTP JSON plugin README with more examples. 2015-11-03 10:16:59 -07:00
Cameron Sparr
422d240afb Mongodb should output 2 plugins in test mode
closes #336
2015-11-02 17:23:40 -07:00
Cameron Sparr
2b966b40f2 Completely tab-indent the Makefile 2015-11-02 14:32:06 -07:00
Sean Reifschneider
a992e16f7d On a package upgrade, restart telegraf.
closes #338
2015-11-02 13:01:58 -07:00
Cameron Sparr
0398dc1226 Dont overwrite 'host' tag in redis plugin
fixes #331
2015-11-02 11:30:49 -07:00
Eugene Dementiev
5592738603 [rabbitmq plugin] Add support for per-queue metrics
Also metrics now are gathered concurrently across servers. Fixes #185

fixes #185
closes #334
2015-11-02 11:13:24 -07:00
Eugene Dementiev
688ffd024b [amqp output] Add ability to specify influxdb database
and retention policy, as well as precision as amqp headers

closes #333
2015-11-02 11:12:09 -07:00
JP
4ac1c819e0 add elasticsearch README
closes #327
2015-11-02 11:04:43 -07:00
JP
a6e0ae2896 add ValidateTaggedFields func to testutil accumulator 2015-11-02 11:03:41 -07:00
JP
cb8499c264 optinally gather cluster and index health stats 2015-11-02 11:03:41 -07:00
Cameron Sparr
d2fb065d0d Prometheus client test refactor
closes #318
2015-10-28 16:25:15 -06:00
Tait Clarridge
4449f7f2fb Add prometheus_client service output module, update prometheus client
- Adds a client implementation using the prometheus go_client library
  that exposes metrics.

- Adds a new type of output "ServiceOutput" which follows inline with
  the "ServicePlugin", adding a Stop and Start method for the service

This change also requires the newer prometheus/client_golang code, so
the prometheus plugin needed to be changed.

Added the following to Godep:
    - bitbucket.org/ww/goautoneg (in github.com/common/expfmt/encode.go)
    - prometheus/common/expfmt (in plugins/prometheus.go)
    - github.com/prometheus/common/model (in plugins/prometheus.go)
    - github.com/prometheus/procfs (in github.com/client_golang/prometheus)
    - github.com/beorn7/perks/quantile (in github.com/client_golang/prometheus)

X-Github-Meta: closes #306
2015-10-28 15:28:39 -06:00
JP
7cc60dfb8f update mongostat from github.com/mongodb/mongo-tools
closes #323
2015-10-28 15:26:04 -06:00
Cameron Sparr
028bae8f04 Run make in circle, don't build arm and 32-bit 2015-10-28 12:30:58 -06:00
Cameron Sparr
fa9555c430 Execute "long" unit tests using docker containers
fixes #293
2015-10-28 11:45:04 -06:00
Cameron Sparr
48d11f0a5c Mongostat diff bug, less equal to less 2015-10-28 10:44:09 -06:00
Cameron Sparr
09a0c3b40f Update README & CHANGELOG with docker and NSQ changes 2015-10-27 15:47:27 -06:00
Jonathan Cross
e622bd5e7f fixing test for NoError
closes #325
2015-10-27 15:44:22 -06:00
Jonathan Cross
0d31f40e16 use index 0 of server array for nsq test 2015-10-27 15:44:22 -06:00
Jonathan Cross
e13500fc4f updated for new output Write function
removed HTTP listener port in docker compose. Not being used by plugin.
2015-10-27 15:44:22 -06:00
Jonathan Cross
2a76942a74 NSQ Output plugin
NSQ output plugin, following the NSQ methodology output is a producer
to one instance of NSQD. The go library does not accept array values be
default for a Producer. Additionally service discovery is generally
done as a consumer.

Follows same methodology as Kafka Output without the tag reference.
2015-10-27 15:44:22 -06:00
Cameron Sparr
c73c28de7e Update CHANGELOG with version 0.2.0 2015-10-27 15:43:47 -06:00
Ellison Marks
9e0ec0927c Making sure telegraf.d directory is created by packages. 2015-10-27 11:32:00 -07:00
Ellison Marks
23e6715a02 Making the field name matching when merging respect the toml struct tag.
If the field has a toml struct tag, don't try fuzzy matching, thanks to
@ekini.
2015-10-27 11:31:43 -07:00
Cameron Sparr
f7eae86cdb Update README to version 0.2.0 2015-10-26 22:05:27 -06:00
Cameron Sparr
889c0a50a4 Fixup random interval jittering 2015-10-26 13:34:31 -06:00
JP
7d15061984 add librato output plugin, update datadog plugin to skip non-number metrics
closes #322
2015-10-26 13:29:53 -06:00
Tait Clarridge
ccbfb038ee Change aerospike default config to localhost
The default config was in a non-runnable state if one were to
attempt to use it with the docker-machine setup. Changed to localhost.

closes #321
2015-10-26 10:57:10 -06:00
palkan
cb951ebd28 Add httpjson readme
closes #275
2015-10-23 18:34:27 -06:00
palkan
d35c78e933 Rename Tags to TagKeys 2015-10-23 18:33:04 -06:00
palkan
e9356c893b [Fix #190] Add httpjson tags support 2015-10-23 18:33:04 -06:00
JP
869483617b add host to metric, replace '_' with '.'
closes #312
2015-10-23 18:25:26 -06:00
palkan
df96958fb8 Use specific mysql version with docker
closes #315
2015-10-23 17:35:49 -06:00
palkan
de7ad9dfbc Replace opentsb docker image with the official one
closes #314
2015-10-23 17:34:12 -06:00
palkan
bf1cf4557e Update kafka reamde; improve intergration tests
closes #313
2015-10-23 17:33:23 -06:00
Cameron Sparr
86d20496ea Fix MySQL DSN -> tags parsing
Closes #297
2015-10-22 17:16:19 -06:00
Cameron Sparr
ae7ad2230f Support printing output with usage flag too 2015-10-22 14:24:51 -06:00
Ellison Marks
2007064c47 Fix for tags in the config not being applied to the agent.
fixes #302
closes #308
2015-10-22 13:58:59 -06:00
Cameron Sparr
c8852339c9 Do not fail Connect() in influxdb output when db creation fails
Fixes #304
2015-10-22 11:14:10 -06:00
Cameron Sparr
eb0a19062e When MongoDB freezes or restarts, do not report negative diffs
Fixes #253
2015-10-22 10:55:26 -06:00
Cameron Sparr
2f08577967 Fix output panic for -test flag 2015-10-21 18:32:43 -06:00
Cameron Sparr
891f3af504 Update CHANGELOG & README with aerospike plugin 2015-10-21 18:08:43 -06:00
Tait Clarridge
c5f200917a Add aerospike plugin support
- Does not use the aerospike client, but sends the stats command
  using the aerospike required format
- Queries available namespaces and gets stats for all of them

closes #300
2015-10-21 18:04:45 -06:00
Cameron Sparr
21622a1a17 Update CHANGELOG with new flushing options 2015-10-21 17:37:15 -06:00
Cameron Sparr
a1067fa4ae Normalize collection interval to nearest interval
closes #301
2015-10-21 17:31:27 -06:00
Ellison Marks
4395a46190 Tests for LoadDirectory.
closes #295
2015-10-21 14:07:09 -06:00
gotyaoi
c938523cd5 Implementing LoadDirectory. 2015-10-21 12:00:22 -07:00
gotyaoi
ae10fc7fb4 Fixing old tests and adding new ones for new code. 2015-10-21 12:00:21 -07:00
gotyaoi
0299a17da1 Moving the Duration wrapper to it's own package to break import loops. 2015-10-21 12:00:21 -07:00
gotyaoi
d77cfd6ecc Adding testify/suite to godep. 2015-10-21 11:59:20 -07:00
gotyaoi
03d79996de Moving away from passing around *ast.Tables.
Config in the config directory will need to be merged into the main
config, which is difficult to do using the *ast.Tables. Get the config
into structs as soon as possible and then merge the structs.
2015-10-21 11:59:19 -07:00
Eugene Dementiev
553208a960 Combine BatchPoints with the same RoutingTag to one message in amqp output
closes #287
2015-10-21 11:53:08 -06:00
Cameron Sparr
dfc59866e8 Add support for retrying output writes, using independent threads
Fixes #285
2015-10-21 11:17:01 -06:00
Cameron Sparr
ac685d19f8 Clean up logging messages and add flusher startup delay
Fixes #294
2015-10-20 16:45:31 -06:00
Joseph Dykstra
dd2e9e08df Add periods to the end of sentences
Closes #288
2015-10-20 14:20:30 -06:00
Roman Statsevich
499b5befd6 add bcache plugin
Closes #286
2015-10-20 14:17:09 -06:00
Cameron Sparr
c26ce9c4fe Utilizing new client and overhauling Accumulator interface
Fixes #280
Fixes #281
Fixes #289
2015-10-20 13:53:58 -06:00
Cameron Sparr
6263bc2d1b Godep update: influxdb 2015-10-20 10:17:34 -06:00
Cameron Sparr
f7504fb5eb InfluxDB does not accept uint64, so cast them down to int64
Fixes #290
2015-10-19 18:53:40 -06:00
Tyler Nisonoff
6869362f43 added keyspace hitrate measurement
Closes #283
2015-10-18 18:00:34 -06:00
Tyler Nisonoff
7600cc87d8 added connections measurement with user tag
Closes #284
2015-10-18 17:43:36 -06:00
Jonathan Cross
3192c78d96 fixed test to check actual value
Closes #273

caught a typo :D using it
2015-10-18 17:39:53 -06:00
Jonathan Cross
c3dad00c1b PuppetAgent Plugin
Added PuppetAgent Plugin reads last_run_summary file
2015-10-18 17:37:11 -06:00
Cameron Sparr
a1bad378d2 Turn off GOGC for faster build time in CI 2015-10-18 15:56:47 -06:00
Cameron Sparr
73f1ed4f25 Use Unix() int64 time for comparing timestamps in kafka consumer 2015-10-16 16:58:52 -06:00
Cameron Sparr
b28b4bd71e Fix ApplyTemplate change in graphite parser 2015-10-16 16:43:31 -06:00
Cameron Sparr
b15928c95e godep update: influxdb 2015-10-16 16:26:58 -06:00
Cameron Sparr
97d4f9e0ff Run go fmt in CI 2015-10-16 13:08:32 -06:00
Cameron Sparr
0986caf0ad Fix Go vet issue, test accumulator should be passed by reference with lock
Closes #276
2015-10-16 11:21:44 -06:00
palkan
9cccf8f88a Add locking to test accumulator 2015-10-16 10:58:46 -06:00
Joseph Dykstra
3ae5b4b280 Fix typos
Closes #270
2015-10-16 10:54:51 -06:00
Oskar Risberg
62b0e25b84 Add phpfpm to readme
Closes #274
2015-10-16 10:53:36 -06:00
Cameron Sparr
4e5ed9d3b9 Change config file indentation to 2 spaces 2015-10-15 15:53:29 -06:00
Sean Reifschneider
555436a222 Fix for init script for other procs with "telegraf"
The init script fails if another process has the word "telegraf" in
it, for example if you aren running "vi /etc/opt/telegraf/telegraf.conf"
or "tail -f /var/log/telegraf/telegraf.log".  This is because
the "-f" flag to "pgrep" will show processes with the search
string anywhere in the command-line.

This patch turns it around and gets the "ps" output for the process
in the pidfile, and if that line has "telegraf" in it, it considers
it to be running.

Closes #266
Closes #267
2015-10-15 15:06:05 -06:00
Cameron Sparr
6977119f1e Statsd plugin, tags and timings
Closes #237
Closes #39
2015-10-15 12:07:36 -06:00
Cameron Sparr
52be516fa3 wget and install go1.5.1 on machine 2015-10-14 17:54:00 -06:00
Cameron Sparr
2dd3eee58e Use graphite parser for templating, godep update to head 2015-10-14 17:54:00 -06:00
Cameron Sparr
d40351286a Refactoring gauges to support floats, unit tests 2015-10-14 17:54:00 -06:00
Cameron Sparr
d84a258b0a Statsd: unit tests for gauges, sets, counters 2015-10-14 17:54:00 -06:00
Cameron Sparr
eb2a4dc724 Statsd listener plugin
implement gauges, sets, counters
2015-10-14 17:54:00 -06:00
Cameron Sparr
316fa1cc01 Add recently-added plugins to list 2015-10-14 17:53:47 -06:00
Sean Reifschneider
04e2db1f41 Issue #264: Fixes for logrotate config file.
This adds copytruncate and dateext to match the Influxdb logrotate file,
and removes nocreate (again, to match influxdb).

Closes #264
Closes #265
2015-10-14 17:51:37 -06:00
Jonathan Cross
2f7d781635 remove zookeeper declaration
since spotify/kafka docker image already exposes zookeeper

Closes #262
2015-10-14 17:49:23 -06:00
Jonathan Cross
88ff269370 added measurement prefix 2015-10-14 17:48:21 -06:00
Jonathan Cross
7121e1a3b0 fixes based on comments 2015-10-14 17:48:21 -06:00
Jonathan Cross
8fd06b96d7 Zookeeper plugin
Created a zookeeper plugin that fetches from the ‘mntr’ command will
output measurements that are int and string based
2015-10-14 17:48:21 -06:00
Cameron Sparr
181c3cdc28 Update CHANGELOG with recent bugfixes
Closes #261
2015-10-13 17:58:17 -06:00
Eugene Dementiev
ccfa913186 Fix crash if login/password is incorrect in rabbitmq plugin. Closes #260
Closes #260
2015-10-13 17:54:29 -06:00
Eugene Dementiev
2a9f31bfea Add sample for exec plugin. Fixes #245
Closes #258
2015-10-13 17:53:18 -06:00
Vinh
0bc76f094a Add PHPFPM stat
- HTTP status or Socket status
- Collect those metric:
    accepted conn:
    listen queue:
    max listen queue:
    listen queue len:
    idle processes:
    active processes:
    total processes:
    max active processes:
    max children reached:
    slow requests:
- Tag metric with: `host` and `pool` name

Closes #255
2015-10-12 15:40:42 -06:00
Shirou WAKAYAMA
d394003739 add UDP socket counts and rename to 'netstat'.
Closes #244
2015-10-12 00:08:35 -06:00
Shirou WAKAYAMA
17dd058308 add REAME about TCP Connection plugin. 2015-10-12 00:05:10 -06:00
Shirou WAKAYAMA
99b1a3071d add NetConnections to the mockPS. 2015-10-12 00:05:10 -06:00
Shirou WAKAYAMA
dc38e448bd add tcp connections stat plugin. 2015-10-12 00:05:10 -06:00
Michael Bushey
1d1180ec0c telegraf-agent.toml: Fix example port and use complete examples for mysql plugin 2015-10-09 15:52:46 -07:00
Cameron Sparr
81539c4ed6 Merge pull request #252 from aristanetworks/master
Added Mountpoints option to system/disk plugin to report stats for selected mountpoints
2015-10-09 14:23:18 -06:00
subhachandrachandra
cf1dcfe37c Dropped SkipInodeUsage option as "drop" achieves the same results.
Fixed a bug in restricting Disk reporting to specific mountpoints
Added tests for the Disk.Mountpoints option
Fixed minor bug in usage of assert for the cpu tests where expected and actual values were swapped.
2015-10-08 14:17:04 -07:00
Cameron Sparr
7293376973 Race condition fix: copy BatchPoints into goroutine
Fixes #250
2015-10-08 14:27:22 -06:00
Cameron Sparr
d9f1a60a64 godep update: gopsutil 2015-10-07 16:12:55 -06:00
subhachandrachandra
4f6526e1a5 Merge remote-tracking branch 'upstream/master' 2015-10-07 14:49:47 -07:00
subhachandrachandra
e6ea09f482 Added Mountpoints and SkipInodeUsage options to the Disk plugin to control
which mountpoint stats get reported for and to skip inode stats.
2015-10-07 14:42:11 -07:00
Cameron Sparr
d620651ef6 procstat plugin, consolidate PID-getting 2015-10-07 14:13:33 -06:00
Cameron Sparr
9221f93be9 Allow procstat plugin to handle multiple PIDs from pgrep
Closes #248
2015-10-07 13:48:55 -06:00
Cameron Sparr
795ea49093 Add pid tag to procstat plugin, dont exit on error, only log 2015-10-07 11:42:50 -06:00
Ranjib Dey
6827459b9f fix typo in sample config and README
Closes #240
2015-10-07 11:19:55 -06:00
Ranjib Dey
e424d47ce6 fix plugin registration name 2015-10-07 11:11:47 -06:00
Ranjib Dey
ca0e732331 fix toml struct string 2015-10-07 11:11:47 -06:00
Ranjib Dey
8e52905ea9 add readme for procstat plugin 2015-10-07 11:11:12 -06:00
Cameron Sparr
5cc26bb640 godep update for procstat 2015-10-07 11:11:12 -06:00
Ranjib Dey
fdf00c1be6 Monitor process by pidfile or exe name 2015-10-07 11:11:12 -06:00
Cameron Sparr
47258a7093 Godep update: gopsutil 2015-10-07 10:41:44 -06:00
cornerot
5112d077d5 add tabs in the apache sampleConfig var
Closes #246
2015-10-06 10:34:03 -06:00
Cameron Sparr
b4e8a23da4 godep update: gopsutil 2015-10-05 11:10:24 -06:00
Shirou WAKAYAMA
63e9a4ae68 Fix godeps for MQTT output and remove hostname setting
Closes #241
2015-10-05 10:56:43 -06:00
Shirou WAKAYAMA
7e96a9afda Change MQTT output topic format to split plugin name. 2015-10-05 10:43:46 +09:00
Shirou WAKAYAMA
f5a225f1e0 update Godep.json 2015-10-04 23:57:40 +09:00
Shirou WAKAYAMA
6f4a3816a5 Add MQTT output. 2015-10-04 22:52:29 +09:00
subhachandrachandra
29363794c1 Merge remote-tracking branch 'upstream/master' 2015-09-30 17:02:58 -07:00
Cameron Sparr
64a3a718e6 CHANGELOG feature updates 2015-09-29 14:15:23 -07:00
Cameron Sparr
b01c28ebc6 Clean up additional logging and always print basic agent config 2015-09-29 14:06:49 -07:00
Cameron Sparr
f5d1aaf7d9 Memory plugin: re-add cached and buffered to memory plugin 2015-09-28 17:05:42 -07:00
Cameron Sparr
f6f45881da Add more logging to telegraf 2015-09-28 16:57:03 -07:00
Nick Jones
cd7468f3be Fix conditional test against useradd so it's compatible with Dash
The test to see which version of `useradd` is installed uses 'bashisms'
that fail on Ubuntu due to the fact that `/bin/sh` is symlinked to Dash,
causing the telegraf account to be created without the `--system` option
ever being passed.

This change amends the syntax so that it's POSIX-compatible and more
portable as a result.
2015-09-28 14:04:46 +01:00
subhachandrachandra
cd93b9ae0b Merge remote-tracking branch 'upstream/master'
Conflicts:
	Godeps/_workspace/src/github.com/shirou/gopsutil/mem/mem_darwin.go
	Godeps/_workspace/src/github.com/shirou/gopsutil/mem/mem_linux.go
2015-09-25 15:19:11 -07:00
Eugene Dementiev
0ffaafd788 Fix packages provides: now new version of package replaces the old one 2015-09-24 13:29:37 -07:00
Eugene Dementiev
c6283d1b5a AMQP auto reconnect feature
Closes #207
2015-09-24 10:30:00 -07:00
Josh Palay
24527859e6 Fix printf format issue
Closes #227
2015-09-23 15:44:25 -07:00
Josh Palay
0c6c5718fe Adds command intervals to exec plugin 2015-09-23 15:42:48 -07:00
Ruslan Islamgaliev
c4bbc18cb6 Make nginx_test check port in nginx module tags
Closes #223
2015-09-23 15:41:46 -07:00
Ruslan Islamgaliev
6e76759225 Add port tag to nginx plugin 2015-09-23 15:40:43 -07:00
Cameron Sparr
87ed2d4a21 Update CHANGELOG with ekini's changes and docker plugin
Closes #226
2015-09-23 15:37:21 -07:00
Eugene Dementiev
74b3309225 Add timestamps to points in Kafka/AMQP outputs 2015-09-23 15:31:37 -07:00
Cameron Sparr
1d741cbfc5 Update godep of go-dockerclient for Label access 2015-09-23 14:25:26 -07:00
Cameron Sparr
12420db4b9 docker plugin: Add docker labels as tags in
Closes #90
2015-09-23 14:20:15 -07:00
Cameron Sparr
aad6a7e262 Only run the cpu plugin twice when using -test
This can be easily extended to other plugins that need it by tacking
their name onto the switch statement. Also eliminating the unused
TestAllPlugins code and cleaning up some stray Printlns
2015-09-23 13:56:14 -07:00
Cameron Sparr
b12b804f0a Make redis password config more clear.
Also make certain that the 'host' tag does not include the password for
security reasons

Closes #225
2015-09-23 13:28:36 -07:00
Cameron Sparr
64d38ed17e Remove duplicate opentsdb docker images 2015-09-22 21:10:21 -06:00
Cameron Sparr
f8d64a7378 Redis: include per-db keyspace info
Closes #205
2015-09-22 19:46:50 -07:00
Cameron Sparr
b92a0d5126 Redis plugin, add key metrics and simplify parsing 2015-09-22 16:27:22 -07:00
Cameron Sparr
e0372358df Update changelog with info about filtering 2015-09-22 16:09:24 -07:00
Cameron Sparr
1bce6e3faf Updating README and CHANGELOG for 0.1.9 2015-09-22 13:43:37 -07:00
Cameron Sparr
81dd281789 Remove gvm from packaging script 2015-09-22 11:46:51 -07:00
Cameron Sparr
f7b38dc270 Update deb/rpm package config, package script 2015-09-22 10:56:01 -07:00
Cameron Sparr
ec9819071a Add -outputfilter flag, and refactor the filter flag to work for -sample-config
Closes #211
Issue #199
2015-09-22 10:56:01 -07:00
Ruslan Islamgaliev
72edc3c4fe Select default apache port depending on url scheme 2015-09-22 10:53:53 -07:00
Ruslan Islamgaliev
5657e8d1da Add port tag to apache plugin 2015-09-22 10:52:45 -07:00
Cameron Sparr
0700e0cf94 Update gopsutil godep dependency
Closes #219
2015-09-22 09:28:37 -07:00
Cameron Sparr
1cd2db9f8c Memory plugin: use 'available' instead of 'actual_'
Closes #214
2015-09-21 17:39:39 -07:00
Cameron Sparr
10d411c4f7 Update new memory unit tests, documentation 2015-09-21 17:22:24 -07:00
Cameron Sparr
167b8b8eb8 Godep update gopsutil to get darwin mem fix 2015-09-21 17:22:24 -07:00
Cameron Sparr
74da03d9fa Refactor memory stats, remove some, add 'actual_' stats 2015-09-21 17:22:23 -07:00
Cameron Sparr
b8a58dad65 Fix CPU unit tests for time_ prefix 2015-09-21 17:08:50 -07:00
Cameron Sparr
b012713cf2 Adding time_ prefix to all CPU time measurements 2015-09-21 10:23:46 -07:00
Cameron Sparr
82d914149e Adding a retry to the initial telegraf database connection
Fixes #187
2015-09-18 18:03:47 -07:00
Eugene Dementiev
450f5e03a5 Add shebang to postinstall script (fixes installation on Debian family)
Closes #212
2015-09-18 15:07:11 -07:00
Eugene Dementiev
b04706b875 Fix makefile warning for go1.5 2015-09-18 21:16:08 +03:00
Cameron Sparr
10b0438201 Remove cpu_usage_busy, this is simply 100-cpu_usage_idle 2015-09-17 17:46:35 -07:00
Cameron Sparr
0270ace3d4 Add a CPU collection plugin README 2015-09-17 17:46:34 -07:00
Cameron Sparr
bbb27fa484 Update gopsutil dependency to enable 32-bit builds 2015-09-17 17:46:34 -07:00
Cameron Sparr
df15e7b379 Remove non-existent 'stolen' cpu stat, fix measurement names 2015-09-17 17:46:34 -07:00
Cameron Sparr
df651ab98e Properly vendor the gopsutil dependency 2015-09-17 17:46:34 -07:00
Cameron Sparr
dd7a3b37b0 Delete 'vendored' gopsutil directory 2015-09-17 17:46:34 -07:00
Tim Allen
94a623c00e Check if file exists before running disk usage on it. Not all mounts are normal files.
Closes #208
2015-09-17 17:45:58 -07:00
Cameron Sparr
6cb0f2d392 Revert godep updates, needs a fix in influxdb repo 2015-09-17 00:57:39 -07:00
Cameron Sparr
17e165382f Add amqp/rabbitmq to output list in readme 2015-09-16 17:30:10 -07:00
Cameron Sparr
733ba07312 Changing AddValues to AddFields and temp disabling adding w time
Currently adding with time is broken, because InfluxDB does not support
using precision for timestamp truncation both with and without
timestamps. This will be re-enabled once we fix InfluxDB to use the
precision argument for truncation in all cases, and a "unit" argument
in the line-protocol for adding points with non-nanosecond stamps

Fixes #175
2015-09-16 16:59:48 -07:00
Cameron Sparr
46cd9ff9f5 Update influxdb godeps for line-protocol precision fix 2015-09-16 16:59:48 -07:00
Cameron Sparr
66ed4f7328 mysql plugin: don't emit blank tags
closes #201
2015-09-16 14:24:38 -07:00
Cameron Sparr
3be6d84675 Catching up on some CHANGELOG updates 2015-09-16 14:23:57 -07:00
Eugene Dementiev
406e980fae install and init script for el5
Fixes #186
Closes #203
2015-09-16 14:19:57 -07:00
Oliver Buschjost
211065565f Add HTTP 5xx stats to HAProxy plugin. Closes #194 2015-09-16 14:10:09 -07:00
Cameron Sparr
d979ee5573 AMQP routing tag doc & add routing tag for Kafka
closes #200
2015-09-16 12:10:26 -07:00
Roman Plessl
c843b53c30 added docker image unit test with OpenTSDB 2015-09-16 11:01:17 -07:00
Eugene Dementiev
5d280e4d25 AMQP output plugin typo fixes and added README and RoutingTag 2015-09-16 10:59:29 -07:00
Eugene Dementiev
f00d43aa09 Added amqp output 2015-09-16 10:58:38 -07:00
Cameron Sparr
2e68d3cb3c Merge pull request #198 from mced/fix_mem_used_perc
[fix] mem_used_perc returns percentage of used mem
2015-09-15 15:24:48 -07:00
Cédric Menassa
4d6f11b61f [fix] mem_used_perc returns percentage of used mem 2015-09-15 12:58:51 +02:00
Kevin Bouwkamp
aac9ba6c1e add bugfix in CHANGELOG and some notes in pg README
Closes #192
2015-09-14 18:48:01 -07:00
Kevin Bouwkamp
d926a3b5da no longer duplicate ignored columns here 2015-09-14 18:47:07 -07:00
Kevin Bouwkamp
fa5753c579 Makes the test also work across pg versions 2015-09-14 18:47:07 -07:00
Kevin Bouwkamp
3fa3b2d836 add some comments 2015-09-14 18:47:07 -07:00
Kevin Bouwkamp
76041e84e8 fix some more indentation... 2015-09-14 18:47:07 -07:00
Kevin Bouwkamp
19c6572926 Add a few notes about the connection strings 2015-09-14 18:47:07 -07:00
Kevin Bouwkamp
2217fb8c58 uncomment to skip test in short mode 2015-09-14 18:47:07 -07:00
Kevin Bouwkamp
50fcb3914d Generating metric information dynamically. Makes compatible with postgresql versions < 9.2 2015-09-14 18:47:06 -07:00
Roman Plessl
9a0c0886ce added more UNIT test cases for covering all parts of the code
added debug statement for debugging OpenTSDB communication

Closes #182
2015-09-14 18:43:56 -07:00
Roman Plessl
fc41cc9878 added prefix settings of the module and rearrange go test code 2015-09-14 18:41:43 -07:00
Roman Plessl
08b220a1fb added docker image unit test with OpenTSDB 2015-09-14 18:41:43 -07:00
Roman Plessl
7e3beaf822 fix spaces with gofmt 2015-09-14 18:41:43 -07:00
Roman Plessl
d2150efc19 added readme as suggested / whished in #177 2015-09-14 18:41:43 -07:00
Roman Plessl
380146b75b added opentsdb as sink 2015-09-14 18:41:43 -07:00
Roman Plessl
2bf096cfc7 adds opentsdb telnet output plugin 2015-09-14 18:41:42 -07:00
Roman Plessl
cb887dee81 change/fix expected test result 2015-09-14 18:41:42 -07:00
Roman Plessl
2ee7d5eeb6 code improvements after running tests / compile step 2015-09-14 18:41:42 -07:00
mced
6d6158ff08 [fix] mem_used_perc returns percentage of used mem
Closes #189
2015-09-14 12:18:31 -07:00
Cameron Sparr
11126cf4ae Add a server name tag to the RabbitMQ server list
Fixes #183
2015-09-11 16:46:49 -07:00
Ruslan Islamgaliev
bd00f46d8b Fix docker stats to make it work on centos 7.
issue #58
issue #84
2015-09-11 16:26:08 -07:00
Cameron Sparr
d8482cc286 darwin net plugin fix, really need to godep vendor gopsutil 2015-09-10 13:57:57 -06:00
Cameron Sparr
f7a4317990 Fix multiple redis server bug, do not cache the TCP connections
Fixes #178
2015-09-10 11:51:15 -06:00
Vye Wilson
a55f6498c8 Makefile will now honor GOBIN, if set
Closes #181
2015-09-10 11:50:11 -06:00
Cameron Sparr
81f4aa9a5d Fix bug in setting the precision before gathering metrics
Closes #175
2015-09-09 21:29:55 -06:00
Cameron Sparr
3c7c8926fb Support InfluxDB clusters
Closes #143
2015-09-09 17:57:17 -06:00
Cameron Sparr
a7ed46160a Re-arrange repo files for root dir cleanup 2015-09-09 12:38:51 -06:00
Cameron Sparr
a9b97c7a2b Bump go version number to 1.5 2015-09-09 12:07:58 -06:00
Cameron Sparr
0780ad4ad9 README updates for systemd and deb/rpm install 2015-09-09 12:04:59 -06:00
Cameron Sparr
bf9992b613 Update telegraf.service and packaging script for systemd
Deals with most of #170
2015-09-08 18:23:18 -06:00
Cameron Sparr
8c5e1ff0a0 Update README plugins list 2015-09-04 17:05:50 -06:00
Cameron Sparr
b3044a6e2b Put all ARCH binaries on the README 2015-09-04 16:37:07 -06:00
Cameron Sparr
6260dd1018 Makefile rule for building all linux binaries, and upload all ARCHs 2015-09-04 14:12:50 -06:00
Cameron Sparr
e47801074e package.sh script fixes for uploading binaries 2015-09-04 13:19:13 -06:00
Cameron Sparr
6d42973d7c Update package script and readme for 0.1.8 2015-09-04 12:53:29 -06:00
Cameron Sparr
68e41f130c Ping plugin
Closes #167
2015-09-04 11:20:49 -06:00
Cameron Sparr
65b33a848e Fix default installed config for consistency 2015-09-02 14:25:40 -06:00
Cameron Sparr
5bfb6df0e0 Write data in UTC by default and use 's' precision
Closes #159
Closes #162
2015-09-02 14:19:36 -06:00
Cameron Sparr
13061d1ec7 package.sh: upload raw binaries to S3
Closes #166
2015-09-02 12:05:29 -06:00
nickscript0
0143a4227e add additional metrics to mysql plugin tests
Closes #165
2015-09-02 11:49:16 -06:00
nickscript0
3f63bcde12 add additional MySQL metrics 2015-09-02 11:48:38 -06:00
Michael Wood
b86c6bba4e README: Say when tagpass/tagdrop are valid from.
closes #163
2015-09-02 09:33:05 -06:00
Cameron Sparr
4d19fc0860 Fixup for g->r change, io.reader was already using 'r' 2015-08-31 16:15:38 -06:00
Cameron Sparr
9c57c30e57 Redis plugin internal names consistency fix, g -> r 2015-08-31 15:57:52 -06:00
Cameron Sparr
9969c4e810 Add system uptime metric, string formatted AND in float64
closes #150
2015-08-31 14:43:34 -06:00
Alexander Oleinik
e2bc5d80c9 Apache Plugin
Closes #158
Fixes #132
2015-08-31 10:17:18 -06:00
Michael Desa
ab191e2b58 Rename DEPENDENCY_LICENSES LICENSE_OF_DEPENDENCIES
Closes #155
Closes #154
2015-08-28 19:37:23 -06:00
Michael Desa
d418a6e872 Add list of dependency licenses 2015-08-28 16:17:46 -07:00
Cameron Sparr
bdfd1aef62 Update README with 0.1.7 and make separate CONTRIBUTING file 2015-08-28 10:21:22 -06:00
Cameron Sparr
ff2de0c715 Only build the docker plugin on linux 2015-08-27 17:09:18 -06:00
Cameron Sparr
5b78b1e548 Clean up agent error handling and logging of outputs/plugins
Closes #145
2015-08-27 13:41:19 -06:00
Cameron Sparr
d1f965ae30 Kafka output producer, send telegraf metrics to Kafka brokers
Closes #38
2015-08-26 17:03:58 -06:00
Cameron Sparr
434267898b Indent the toml config for readability 2015-08-26 09:22:03 -06:00
Cameron Sparr
a00510a73c Outputs enhancement to require Description and SampleConfig functions
Closes #142
2015-08-26 07:34:26 -06:00
Cameron Sparr
846fd31121 Improve build from source instructions
Closes #141
2015-08-25 18:18:56 -06:00
Cameron Sparr
ab4344a781 Merge problem, re-enable non-standard DB names 2015-08-25 16:52:16 -06:00
Cameron Sparr
ac97fefb91 makefile: ADVERTISED_HOST needs only be set during docker-compose target 2015-08-25 16:34:30 -06:00
subhachandrachandra
8d034f544c Fixed memory reporting for Linux systems
/proc/meminfo reports memory in KiloBytes and so needs a multiplier of 1024 instead of 1000.
The kernel reports in terms of pages and the proc filesystem is left shifting by 2 for 4KB pages to get KB. Since this is a binary shift, Bytes will need to shift by 10 and so get multiplied by 1024.

From the kernel code. PAGE_SHIFT = 12 for 4KB pages
"MemTotal:       %8lu kB\n", K(i.totalram)

Closes #131
2015-08-25 14:18:14 -06:00
subhachandrachandra
ca1d2c7000 Fixed total memory reporting for Darwin systems. hw.memsize is reported as bytes instead of pages. 2015-08-25 14:16:18 -06:00
Bruno Bigras
0acf15c025 Typo: prec -> perc
Closes #140
2015-08-25 14:15:12 -06:00
Cameron Sparr
94eed9b43c Add MySQL server address tag to all measurements
Closes #138
2015-08-25 13:58:55 -06:00
Bruno Bigras
8a6665c03f memcached: fix when a value contains a space
Fixes #137
Closes #139
2015-08-25 13:14:40 -06:00
Cameron Sparr
85ae6fffbb Vagrantfile: do a one-way rsync so that binaries don't get shared between VMs and host 2015-08-25 11:54:12 -06:00
Cameron Sparr
bd85a36cb1 Fixes #130, document mysql plugin better, README 2015-08-24 15:08:16 -06:00
Cameron Sparr
a449e4b47c Add #136 to CHANGELOG 2015-08-24 14:56:50 -06:00
Cameron Sparr
42602a3f35 Provide a -usage flag for printing the usage of a single plugin
Closes #136
2015-08-24 14:52:46 -06:00
Cameron Sparr
50f902cb02 Fixes #128, add system load and swap back to default Telegraf config 2015-08-24 13:26:21 -06:00
nickscript0
b014ac12ee Update CHANGELOG.md 2015-08-24 13:09:23 -06:00
nickscript0
610f24e0cd Update CHANGELOG.md 2015-08-24 13:09:23 -06:00
nsvarich
f45f7e56fd add plugin.name to error message 2015-08-24 13:09:23 -06:00
nickscript0
afe366d6b7 go fmt remove whitespace 2015-08-24 13:09:23 -06:00
nickscript0
1daa059ef9 Log plugin errors in crankParallel and crankSeparate cases. Previously errors weren't logged in these cases. 2015-08-24 13:09:23 -06:00
Cameron Sparr
9777aa6165 Update README to point to url without 'v' prepended to version 2015-08-24 10:48:21 -06:00
Cameron Sparr
143ec1a019 Filter out the 'v' from the version tag, issue #134 2015-08-24 10:39:15 -06:00
subhachandrachandra
13ee9ff37b Fixed memory reporting for Linux systems
/proc/meminfo reports memory in KiloBytes and so needs a multiplier of 1024 instead of 1000.
The kernel reports in terms of pages and the proc filesystem is left shifting by 2 for 4KB pages to get KB. Since this is a binary shift, Bytes will need to shift by 10 and so get multiplied by 1024.

From the kernel code. PAGE_SHIFT = 12 for 4KB pages
"MemTotal:       %8lu kB\n", K(i.totalram)
2015-08-21 16:08:54 -07:00
subhachandrachandra
a3c846b73e Fixed total memory reporting for Darwin systems. hw.memsize is reported as bytes instead of pages. 2015-08-21 15:15:19 -07:00
Cameron Sparr
3d05575e9d Fix for #129 README typo in the 0.1.6 package name url 2015-08-21 10:43:19 -06:00
Cameron Sparr
9d00b5e165 Version= doesnt work on go1.4.2
fixing makefile & vagrantfile & build script to reflect that
2015-08-20 16:43:25 -06:00
Cameron Sparr
a29b39e17a README typo fix 2015-08-20 15:18:45 -06:00
Cameron Sparr
8273679634 0.1.6, update changelog, readme, plugins list 2015-08-20 14:45:04 -06:00
Cameron Sparr
f8c1e953d4 godep update influxdb to 0.9.3-rc1 2015-08-20 14:26:44 -06:00
Cameron Sparr
532d953b5a fix for #126, nginx plugin not catching net.SplitHostPort error 2015-08-20 11:26:49 -06:00
Cameron Sparr
ecfdafab06 Add a simple integration test at the end of circle-test.sh similar to homebrew test 2015-08-20 10:07:17 -06:00
Cameron Sparr
9bc39987f1 Change -X main.Version <n> to -X main.Version=<n> for go1.5 2015-08-20 08:46:08 -06:00
Simon Fraser
601b444a60 fix segv on error 2015-08-20 07:49:46 -06:00
Cameron Sparr
4b0671205d packaging script fix, make_dir_tree is req'd 2015-08-19 15:02:55 -06:00
Cameron Sparr
db634f4c0b Fix for issue #121, update etc/config.sample.toml 2015-08-19 14:40:35 -06:00
Josh Palay
7d9efd7cff Modifications to httpjson plugin 2015-08-19 13:25:21 -06:00
Alvaro Morales
06ef2a72c5 Add httpjson plugin 2015-08-19 13:24:07 -06:00
Cameron Sparr
5e8b6dd164 Update CHANGELOG with some recent additions 2015-08-18 15:58:51 -06:00
Cameron Sparr
03c7d564d9 Merge pull request #118 from srfraser/diskusage_windows_fix
Get disk usage stats working on windows
2015-08-18 15:58:13 -06:00
Cameron Sparr
c3ec3f4bc8 Fix issue #119, remove the _workspace/pkg directory from git tracking 2015-08-18 15:51:40 -06:00
Simon Fraser
7273e2e6f2 Get disk usage stats working on windows 2015-08-18 21:31:33 +01:00
Cameron Sparr
af770e042a Update README to reflect new release of 0.1.4 & 0.1.5 2015-08-18 12:50:07 -06:00
Cameron Sparr
07a1bffc60 Updating the packaging script to assume tag has already been set 2015-08-18 12:50:07 -06:00
Cameron Sparr
183e79398d Fix build, testify got removed from godeps somehow 2015-08-18 12:49:25 -06:00
Cameron Sparr
d98bedd6e1 Telegraf 0.1.5, update InfluxDB client to HEAD 2015-08-18 12:12:09 -06:00
Cameron Sparr
461245c83d Telegraf 0.1.4, update godep to point to InfluxDB client 0.9.2 2015-08-18 12:09:52 -06:00
Cameron Sparr
6fcbb7bdb0 Update Makefile with new build requirements 2015-08-18 10:36:13 -06:00
Cameron Sparr
2304d03b40 Add build function to circle-test.sh, and remove release.sh 2015-08-18 09:24:49 -06:00
Cameron Sparr
4e3213f3bd godep: vendor all dependencies & add circle-test.sh
Vendor dependencies and use circle-test.sh to run CI process, because
the CircleCI autobuild operations are not compatible with using godep.
2015-08-17 17:38:44 -06:00
Josh Palay
55fb249f6b exec plugin doesn't crash when given null JSON values 2015-08-14 17:06:14 -06:00
Cameron Sparr
4d614b3088 README update to address issue #113 2015-08-14 15:40:29 -06:00
Cameron Sparr
cad0a762a0 Merge branch 'jipperinbham-datadog-output' 2015-08-14 09:45:12 -06:00
JP
0ae5075cc9 fix tests, remove debug prints 2015-08-13 20:42:57 -05:00
JP
3145a732f2 fix merge conflicts, update import paths 2015-08-13 18:57:05 -05:00
JP
ceaf6fd67a add datadog output 2015-08-13 18:54:09 -05:00
Cameron Sparr
c26fa33094 Release 0.1.5, updating CHANGELOG and README 2015-08-13 15:46:17 -06:00
Cameron Sparr
b199d7a9fe Put quotes around potentially empty bash variables 2015-08-13 15:38:05 -06:00
Cameron Sparr
0e65d8e64e Rebase and fixups for PR #111, fixes issue #33 2015-08-13 14:47:51 -06:00
Josh Palay
1e742aec04 Adds cpu busy time and percentages 2015-08-13 14:30:32 -06:00
Cameron Sparr
ba1e4917d1 Removing DefaultConfig function because there's really no point 2015-08-13 14:26:02 -06:00
Cameron Sparr
4ce61875a4 README updates for readability and ease of use 2015-08-13 14:01:08 -06:00
Cameron Sparr
04963f12a3 Allow a PerCPU configuration variable, issue #108 2015-08-13 13:30:11 -06:00
Cameron Sparr
5d4b6c41a8 circle.yml: verify that golint violations == 0 for some dirs 2015-08-12 15:51:43 -06:00
Cameron Sparr
5cb3a096c1 Fix influx.toml and ListTags string printing 2015-08-12 14:59:48 -06:00
JP
ddf438dac0 add missing import and Tag marshalling 2015-08-12 15:17:50 -05:00
Cameron Sparr
ed13924c5a Merge pull request #109 from influxdb/pr-107
Merge of PR #107, Allow Telegraf to output data to multiple locations beyond InfluxDB, such as Riemann or Kafka
2015-08-12 11:21:35 -06:00
Cameron Sparr
5cc6f88ade Update changelog with PR #107, thanks @jipperinbham 2015-08-12 11:08:45 -06:00
Cameron Sparr
32124a7913 Adding a Close() function to the Output interface and to the agent 2015-08-12 11:04:25 -06:00
Cameron Sparr
08042089f9 Followup to issue #77, create configured database name from toml file 2015-08-12 10:54:13 -06:00
JP
53969ae054 move tags to influxdb struct, update all sample configs 2015-08-12 10:23:00 -06:00
Cameron Sparr
16c424de2a Print version number on startup, issue #104 2015-08-11 14:23:16 -06:00
Cameron Sparr
9e2f8f664b Followup to issue #77, create configured database name from toml file 2015-08-11 14:02:04 -06:00
Cameron Sparr
343d8f87b4 Update CHANGELOG with fix for issue #101
I really need to remember to do this with the initial commit.
2015-08-11 11:28:36 -06:00
Cameron Sparr
374a0af084 Fix for issue #101, switch back from master branch if building locally 2015-08-11 11:07:39 -06:00
Cameron Sparr
9f2e6d6172 Update CHANGELOG with PR #106 2015-08-11 11:04:50 -06:00
Cameron Sparr
af647990ab Merge pull request #106 from zepouet/master
New option to filter the plugins to run at startup
2015-08-11 11:02:26 -06:00
nicolas
9b2b1df7e2 Go FMT missing
Merge branch 'master' of https://github.com/zepouet/telegraf
2015-08-11 19:01:51 +02:00
nicolas
abdef7c326 Go FMT missing... 2015-08-11 19:01:37 +02:00
Cameron Sparr
b312e48d31 Revert "PR #59, implementation of multiple outputs"
This reverts commit 48a075529a, reversing
changes made to 924700f381.
2015-08-11 10:34:00 -06:00
Cameron Sparr
48a075529a PR #59, implementation of multiple outputs 2015-08-11 10:21:00 -06:00
Cameron Sparr
7f22211e4b Update changelog with PR #103 2015-08-11 10:19:29 -06:00
Simon Fraser
a63c3c8e0b Ensure tests pass now that we're passing fstype around
go fmt checks

Rework the example configuration snippets
2015-08-11 10:19:29 -06:00
Simon Fraser
bba162c55b to filter by filesystem type, we need to pass that up the chain 2015-08-11 10:19:29 -06:00
Simon Fraser
540ba6d6ae tag filtering description added 2015-08-11 10:19:29 -06:00
Simon Fraser
29e8ce68e4 Modify ShouldPass so that it checks the tags of a metric, if configured.
A plugin can have 'tagpass' and 'tagdrop' subsections:

[disk.tagpass]

And tagname = array lists of things to filter by:

fstype = [ "ext4", "xfs" ]
path = [ "/", /opt", "/home" ]

[disk.tagdrop]
path = [ "/" ]
2015-08-11 10:19:29 -06:00
Nicolas
5691253acd Update Readme with new option filter and add usage chapter with --help 2015-08-11 18:18:52 +02:00
Simon Fraser
cd5c85a245 ShouldPass needs to know the tags being used 2015-08-11 10:13:55 -06:00
Cameron Sparr
7e1d1c19e6 Fix for issue #77, create telegraf database if not exists 2015-08-11 10:13:55 -06:00
Cameron Sparr
46cdb40800 Automate circleci package process 2015-08-11 10:09:26 -06:00
Cameron Sparr
e3c6101b93 Back to regular circle.yml, make and artifact linux binaries
Remove the circle-test.sh script because that environment was having
problems building all of gopsutil.
2015-08-11 10:09:26 -06:00
Simon Fraser
448aeb9c55 fix filename for logrotate config 2015-08-11 10:09:26 -06:00
Simon Fraser
5e55104aa6 Log rotation configuration file, and package.sh modifications to add it to deb and rpm 2015-08-11 10:09:26 -06:00
Cameron Sparr
03cd83dc82 Massive retro-active changelog update 2015-08-11 10:09:26 -06:00
Cameron Sparr
0cebae8e23 README long-line fixing and a couple typos 2015-08-11 10:09:26 -06:00
Cameron Sparr
38bbe7567a Fail and exit telegraf if no plugins are found loaded, issue #26 2015-08-11 10:09:26 -06:00
mocchira
fc95e8401a Add LeoFS plugin 2015-08-11 10:09:26 -06:00
Cameron Sparr
b70f821a10 Revert "Add log rotation to /etc/logrotate.d for deb and rpm packages" 2015-08-11 10:09:25 -06:00
Cameron Sparr
95bb21f3f5 Using gvm & shell test file to manage circleci go environment 2015-08-11 10:09:25 -06:00
Alvaro Morales
94741d52ed Remove simplejson dependency in exec plugin 2015-08-11 10:09:25 -06:00
Cameron Sparr
1ac6da4a8b Fix for issue #93, just use github path instead of gopkg.in 2015-08-11 10:09:25 -06:00
Alvaro Morales
090c0a60fa Add exec plugin 2015-08-11 10:09:25 -06:00
nicolas
e7ca9113bc Add filtering options to select plugin at startup 2015-08-11 17:50:36 +02:00
Cameron Sparr
924700f381 Update changelog with PR #103 2015-08-10 19:22:39 -06:00
Simon Fraser
d280b968d7 Ensure tests pass now that we're passing fstype around
go fmt checks

Rework the example configuration snippets
2015-08-10 19:20:49 -06:00
Simon Fraser
1d8c7a74d6 to filter by filesystem type, we need to pass that up the chain 2015-08-10 19:19:36 -06:00
Simon Fraser
c1dc77c69c tag filtering description added 2015-08-10 19:19:35 -06:00
Simon Fraser
3ecb5a20a5 Modify ShouldPass so that it checks the tags of a metric, if configured.
A plugin can have 'tagpass' and 'tagdrop' subsections:

[disk.tagpass]

And tagname = array lists of things to filter by:

fstype = [ "ext4", "xfs" ]
path = [ "/", /opt", "/home" ]

[disk.tagdrop]
path = [ "/" ]
2015-08-10 19:19:35 -06:00
Simon Fraser
0c1460062d ShouldPass needs to know the tags being used 2015-08-10 19:19:35 -06:00
Cameron Sparr
c0cef8ca43 Fix for issue #77, create telegraf database if not exists 2015-08-10 16:33:18 -06:00
Cameron Sparr
a3e20ab2d6 Automate circleci package process 2015-08-10 13:41:05 -06:00
Cameron Sparr
7a23eb69eb Back to regular circle.yml, make and artifact linux binaries
Remove the circle-test.sh script because that environment was having
problems building all of gopsutil.
2015-08-10 10:47:32 -06:00
Simon Fraser
7da12dc324 fix filename for logrotate config 2015-08-08 23:04:42 +01:00
Simon Fraser
ed9b43e2cc Log rotation configuration file, and package.sh modifications to add it to deb and rpm 2015-08-08 22:10:32 +01:00
Cameron Sparr
2cd56e43a8 Massive retro-active changelog update 2015-08-07 15:41:54 -06:00
JP
91f6c4b740 move tags to influxdb struct, update all sample configs 2015-08-07 15:31:25 -05:00
Cameron Sparr
c0249caef9 README long-line fixing and a couple typos 2015-08-07 09:19:45 -06:00
Cameron Sparr
e0d0bc0966 Fail and exit telegraf if no plugins are found loaded, issue #26 2015-08-07 09:00:40 -06:00
mocchira
24eb7d6bc9 Add LeoFS plugin 2015-08-07 08:58:24 +00:00
JP
48c10f9454 update config sample, marshal tags from toml 2015-08-06 21:03:27 -05:00
Cameron Sparr
d9b208260e Merge pull request #96 from influxdb/revert-87-logrotation
Revert "Add log rotation to /etc/logrotate.d for deb and rpm packages"
2015-08-06 14:11:56 -06:00
Cameron Sparr
5dd16399b3 Revert "Add log rotation to /etc/logrotate.d for deb and rpm packages" 2015-08-06 14:10:20 -06:00
Cameron Sparr
96014f8e94 Merge pull request #92 from Asana/exec
Add exec plugin
2015-08-06 13:21:12 -06:00
Cameron Sparr
5dd14f2ee2 Using gvm & shell test file to manage circleci go environment 2015-08-06 13:03:41 -06:00
Alvaro Morales
ad2e0bc4e3 Remove simplejson dependency in exec plugin 2015-08-06 12:01:42 -07:00
Cameron Sparr
85c61fb684 Fix for issue #93, just use github path instead of gopkg.in 2015-08-06 11:59:07 -06:00
JP
2601a09a83 resolve remaining build errors 2015-08-06 12:00:03 -05:00
JP
d318ef6df7 resolve go vet issues 2015-08-06 11:52:46 -05:00
JP
7ed19de44e fix issue with var rename 2015-08-06 11:49:02 -05:00
JP
72652ff16e resolve merge conflicts 2015-08-05 21:37:18 -05:00
JP
4a12471918 convert influxdb output to multiple outputs 2015-08-05 21:25:14 -05:00
Alvaro Morales
32cbbdbf73 Add exec plugin 2015-08-05 17:51:44 -07:00
Cameron Sparr
ab28707d71 Marking disque tests 'short', circleci container doesnt appear to support tcp? 2015-08-05 17:00:04 -06:00
Cameron Sparr
42a7203b1e Skip per-cpu unit test when in a circle ci container 2015-08-05 16:49:40 -06:00
Cameron Sparr
5259c50612 Mark more unit tests as 'integration' tests when they rely on external services/docker 2015-08-05 16:49:40 -06:00
Cameron Sparr
06a84def5f Merge pull request #71 from kureikain/haproxy_plugin
HAProxy plugin
2015-08-05 15:47:59 -06:00
Codeb Fan
d7bda01ccb Add Nginx plugin (ngx_http_stub_status_module)
Add plugin to collect Nginx basic status information (ngx_http_stub_status_module).
http://nginx.org/en/docs/http/ngx_http_stub_status_module.html
2015-08-05 15:33:28 -06:00
Cameron Sparr
890b2453f8 Adding Disque, Lustre, and memcached to the list of supported plugins 2015-08-05 15:19:58 -06:00
Cameron Sparr
df9e1669cf Merge pull request #76 from kotopes/redis-port-tag
add tag "port" to every redis metric
2015-08-05 15:01:36 -06:00
Cameron Sparr
8b491a46f3 Merge branch 'gfloyd-disque-plugin' 2015-08-05 14:47:26 -06:00
Cameron Sparr
c698dc9784 Build & unit test fixup 2015-08-05 14:47:12 -06:00
Cameron Sparr
77dd1e3d45 Adding Kafka docker container and utilizing it in unit tests 2015-08-05 14:46:31 -06:00
Cameron Sparr
b3cb8d0f53 Verify proper go formatting in circleci job 2015-08-05 14:46:31 -06:00
Cameron Sparr
260fc43281 go fmt fixes 2015-08-05 14:46:31 -06:00
Cameron Sparr
b4ef7bb3ed Adding circleci build badge 2015-08-05 14:46:30 -06:00
Simon Fraser
816313de30 Fix 'go vet' error, +build comment must be followed by a blank line 2015-08-05 14:46:30 -06:00
Cameron Sparr
bb7bdffada Creating circleci job to just lint and vet code 2015-08-05 14:46:30 -06:00
Simon Fraser
0647666c65 Add default log rotation 2015-08-05 14:46:30 -06:00
Simon Fraser
8255945ea7 Tests for the lustre plugin, initial commit 2015-08-05 14:46:30 -06:00
Simon Fraser
2364595697 Require validation for uint64 as well as int64 2015-08-05 14:46:30 -06:00
Simon Fraser
e442d754d0 Lustre filesystem plugin (http://lustre.org/)
The configuration allows users to override the /proc/ files
scanned for data, since that has been known to change with lustre
versions.
2015-08-05 14:46:30 -06:00
Simon Fraser
6b510652ed Add Lustre 2 plugin 2015-08-05 14:46:30 -06:00
Cameron Sparr
9ea5a88f84 Fix GetLocalHost testutil function for mac users (boot2docker) 2015-08-05 14:46:30 -06:00
Cameron Sparr
aa0adc98f9 Build & unit test fixup 2015-08-04 16:48:19 -06:00
Cameron Sparr
fdd2401f7b Adding Kafka docker container and utilizing it in unit tests 2015-08-04 16:30:05 -06:00
Cameron Sparr
6b820d91ae Verify proper go formatting in circleci job 2015-08-04 16:14:17 -06:00
Cameron Sparr
611ad26d1b go fmt fixes 2015-08-04 16:09:59 -06:00
Cameron Sparr
0911b5b2e8 Adding circleci build badge 2015-08-04 15:04:34 -06:00
Cameron Sparr
c660ff80bf Merge pull request #86 from srfraser/lustre2-plugin
Lustre2 plugin
2015-08-04 15:00:16 -06:00
Simon Fraser
ef098923d6 Fix 'go vet' error, +build comment must be followed by a blank line 2015-08-04 21:44:15 +01:00
Simon Fraser
a4f7ffea3f Merge branch 'master' of https://github.com/influxdb/telegraf into lustre2-plugin
Conflicts:
	testutil/accumulator.go
2015-08-04 21:39:17 +01:00
Cameron Sparr
c5deb9d557 Merge pull request #87 from srfraser/logrotation
Add log rotation to /etc/logrotate.d for deb and rpm packages
2015-08-04 14:14:17 -06:00
Cameron Sparr
3ff2ea8d4e Creating circleci job to just lint and vet code 2015-08-04 11:22:26 -06:00
Simon Fraser
85ecee3525 Add default log rotation 2015-08-04 15:30:27 +01:00
Simon Fraser
d09e5f37ab Tests for the lustre plugin, initial commit 2015-08-04 14:54:50 +01:00
Simon Fraser
7edcd7aaf5 Require validation for uint64 as well as int64 2015-08-04 14:53:45 +01:00
Simon Fraser
7def1364b5 Lustre filesystem plugin (http://lustre.org/)
The configuration allows users to override the /proc/ files
scanned for data, since that has been known to change with lustre
versions.
2015-08-04 13:48:09 +01:00
Simon Fraser
3b588f0502 Add Lustre 2 plugin 2015-08-04 13:47:50 +01:00
Cameron Sparr
03c520798e Fix GetLocalHost testutil function for mac users (boot2docker) 2015-08-03 21:01:52 -06:00
Graham Floyd
c0fa6af51b Add disque plugin 2015-07-31 14:46:46 -05:00
Todd Persen
a4d0c47fc6 Merge pull request #49 from marcosnils/container_services
Container services
2015-07-30 16:29:44 -07:00
Vinh
5bf00e87cc Add haproxy plugin 2015-07-22 17:14:31 -07:00
Evgeny Kulev
014ddd76f4 add tag "port" to every redis metric
see issue https://github.com/influxdb/telegraf/issues/74
2015-07-23 00:55:03 +03:00
Evan Phoenix
6eb4bdcf0e Merge pull request #53 from alvaromorales/rethinkdb-fix
Add rethinkdb plugin to all.go
2015-07-21 13:37:55 -07:00
Evan Phoenix
b4e032d9c9 Merge pull request #54 from jipperinbham/mongodb-plugin
add MongoDB plugin
2015-07-21 13:37:44 -07:00
Evan Phoenix
4ca39dfd1e Merge pull request #55 from brocaar/elasticsearch_plugin
Implement Elasticsearch plugin
2015-07-21 13:34:31 -07:00
Evan Phoenix
15ef62747a Merge pull request #60 from brocaar/connection_timeout
Add connection timeout configuration for InfluxDB.
2015-07-21 13:32:26 -07:00
Evan Phoenix
ad6dcb478d Merge pull request #63 from bewiwi/master
Fix redis : change ending call with "\r\n"
2015-07-21 13:31:51 -07:00
Evan Phoenix
0b7aa65dbf Merge pull request #64 from vic3lord/systemd_support
systemd unit support
2015-07-21 13:31:27 -07:00
Evan Phoenix
e484d4bbf4 Merge pull request #72 from vadimtk/master
Add TokuDB metrics to MySQL plugin
2015-07-21 13:27:13 -07:00
Evan Phoenix
e6ff9c6cd5 Merge pull request #73 from ianunruh/plugin/rabbitmq
Add simple RabbitMQ plugin
2015-07-21 13:26:59 -07:00
Ian Unruh
fad63b28d1 Add simple RabbitMQ plugin 2015-07-21 11:48:49 -07:00
Vadim Tkachenko
7a075e091d Add TokuDB metrics to MySQL plugin 2015-07-19 13:01:45 -07:00
Or Elimelech
0df4708267 systemd unit support 2015-07-14 21:42:33 +03:00
Loïc
d5b4e4ba60 Fix redis : change ending call with "\r\n" 2015-07-13 18:21:16 +02:00
Orne Brocaar
b717dc0742 Use string for InfluxDB timeout duration config. 2015-07-12 18:05:44 +02:00
Orne Brocaar
6ad37267e4 Add connection timeout configuration for InfluxDB. 2015-07-10 20:17:26 +02:00
Orne Brocaar
22d4d1fb42 Fix typo (tranport > transport). 2015-07-10 09:00:28 +02:00
JP
98b0543b26 fix merge conflicts 2015-07-09 15:09:43 -05:00
JP
c0512e720c add SSL support, change tag to hostname 2015-07-09 15:06:18 -05:00
Orne Brocaar
0f6664b260 Remove that it only reads indices stats. 2015-07-09 21:02:19 +02:00
Orne Brocaar
f76f99e789 Merge remote-tracking branch 'upstream/master' into elasticsearch_plugin 2015-07-09 21:01:06 +02:00
Orne Brocaar
e2d48f42cc Cleanup repeated logic. 2015-07-09 20:58:54 +02:00
Orne Brocaar
ec138cae62 Remove indices filter. 2015-07-09 20:53:54 +02:00
Orne Brocaar
986b89f5ed Cleanup tests. 2015-07-09 20:46:42 +02:00
Orne Brocaar
d799011039 Implement breakers stats. 2015-07-09 20:43:52 +02:00
Orne Brocaar
0faa1c886a Implement http stats. 2015-07-09 20:38:51 +02:00
Orne Brocaar
cb839d0fe8 Implement transport stats. 2015-07-09 20:36:22 +02:00
Orne Brocaar
ec4079733e Implement fs stats. 2015-07-09 20:32:56 +02:00
Orne Brocaar
4743c9ab16 Implement network stats. 2015-07-09 20:23:04 +02:00
Todd Persen
c4e5e743c4 Update README.md 2015-07-09 12:22:10 -06:00
Todd Persen
1d23681efe Update README.md 2015-07-09 12:20:23 -06:00
Todd Persen
56d49f2f4f Update CHANGELOG.md 2015-07-09 12:19:49 -06:00
Orne Brocaar
ac54b7cdd1 Implement thread-pool stats. 2015-07-09 20:18:24 +02:00
Todd Persen
efe2771a34 Merge pull request #56 from EmilS/plugins/kafka-consumer-readme
Adds README for Kafka consumer plugin
2015-07-09 12:13:10 -06:00
Orne Brocaar
10c4ec74cc Implement JVM stats. 2015-07-09 20:11:46 +02:00
Orne Brocaar
d90026646f Implement process stats. 2015-07-09 20:06:30 +02:00
Orne Brocaar
9cd1344740 Implement os stats. 2015-07-09 20:01:59 +02:00
Orne Brocaar
c6a9335bf2 Refactor parsing "indices" stats. 2015-07-09 19:51:51 +02:00
Orne Brocaar
6c87148cd4 Add node-id and node attributes to tags. 2015-07-09 18:41:16 +02:00
Orne Brocaar
3f6c46e1ec Add node_name to tags. 2015-07-08 23:07:10 +02:00
Emil Stolarsky
b3c13b7aef Adds README for Kafka consumer plugin 2015-07-08 15:45:02 -04:00
Orne Brocaar
55cfd5c904 Check that API reponse is 200. 2015-07-08 21:28:25 +02:00
Orne Brocaar
d38f2223a5 Implement Elasticsearch plugin (indices stats). 2015-07-08 21:14:51 +02:00
JP
86145d5eb5 add MongoDB plugin 2015-07-07 11:25:34 -05:00
Marcos Lilljedahl
ef335d9fd7 Add missing files 2015-07-06 22:23:24 -03:00
Marcos Lilljedahl
d2810ddc95 Add DOCKER_HOST support for tests
This allows to run tests in environments where DOCKER_HOST is used. This
is extremely helpful when using boot2docker to run docker
2015-07-06 22:18:31 -03:00
Alvaro Morales
037c43cd25 Add rethinkdb plugin to all.go. 2015-07-06 17:27:09 -07:00
Marcos Lilljedahl
aa86c16838 Add --no-recreate option to prepare target 2015-07-06 21:17:44 -03:00
Todd Persen
ed16a84e0d Merge pull request #50 from jseriff/master
update init.sh to use telegraf directories
2015-07-06 16:12:36 -07:00
Todd Persen
530a60a52d Merge pull request #52 from benfb/master
use influxdb/telegraf instead of influxdb/influxdb in changelog
2015-07-06 16:11:21 -07:00
Ben Bailey
48463681fb use influxdb/telegraf instead of influxdb/influxdb in changelog 2015-07-06 17:26:31 -05:00
jseriff
f5a8739b7c update init.sh to use telegraf directories
init.sh should use telegraf directories that are established in the package.sh as of 120218f
2015-07-06 11:07:06 -05:00
Marcos Lilljedahl
4471e2bdbb Use postgres default configuration 2015-07-06 03:46:53 -03:00
Marcos Lilljedahl
63552282d7 Remove circle ci implementation due to Golang bug.
I've tried to set-up circleci but I came across a Golang issue that was
preventing CPU tests to PASS. Instead of ignoring those tests I've
submitted an issue to Go (https://github.com/golang/go/issues/11609)
hoping that this gets fixed soon.
2015-07-06 03:38:08 -03:00
Marcos Lilljedahl
ae385b336d Remove unnecessary circleci configuration as we're using default
provided services

Update test users to use circleci default services
2015-07-06 02:20:25 -03:00
Marcos Lilljedahl
0db55007ab Add cirleci script 2015-07-06 01:51:13 -03:00
Marcos Lilljedahl
d545b197ea Add docker containers to test services.
This commit initializes the needed services which are not mocked
so tests can be executed in any environment with docker.

Some default modifications (i.e: connection strings) were also made to
current tests to accomodate them for this setup.

A docker-compose.yml file is provided with all the necessary parameters
for this services to be initialized. Future services can be added
easily by extending this configuration file

In addition a makefile has been introduced to simplify command execution
2015-07-06 01:46:43 -03:00
Todd Persen
bbc6fa57fa Update README.md for v0.1.3 2015-07-05 19:16:08 -07:00
1827 changed files with 395359 additions and 2669 deletions

2
.gitignore vendored
View File

@@ -1,3 +1,3 @@
pkg/
tivan
.vagrant
telegraf

View File

@@ -1,27 +1,248 @@
## v0.2.2 [unreleased]
### Release Notes
### Features
### Bugfixes
## v0.2.1 [2015-11-16]
### Release Notes
- Telegraf will no longer use docker-compose for "long" unit test, it has been
changed to just run docker commands in the Makefile. See `make docker-run` and
`make docker-kill`. `make test` will still run all unit tests with docker.
- Long unit tests are now run in CircleCI, with docker & race detector
- Redis plugin tag has changed from `host` to `server`
- HAProxy plugin tag has changed from `host` to `server`
- UDP output now supported
- Telegraf will now compile on FreeBSD
- Users can now specify outputs as lists, specifying multiple outputs of the
same type.
### Features
- [#325](https://github.com/influxdb/telegraf/pull/325): NSQ output. Thanks @jrxFive!
- [#318](https://github.com/influxdb/telegraf/pull/318): Prometheus output. Thanks @oldmantaiter!
- [#338](https://github.com/influxdb/telegraf/pull/338): Restart Telegraf on package upgrade. Thanks @linsomniac!
- [#337](https://github.com/influxdb/telegraf/pull/337): Jolokia plugin, thanks @saiello!
- [#350](https://github.com/influxdb/telegraf/pull/350): Amon output.
- [#365](https://github.com/influxdb/telegraf/pull/365): Twemproxy plugin by @codeb2cc
- [#317](https://github.com/influxdb/telegraf/issues/317): ZFS plugin, thanks @cornerot!
- [#364](https://github.com/influxdb/telegraf/pull/364): Support InfluxDB UDP output.
- [#370](https://github.com/influxdb/telegraf/pull/370): Support specifying multiple outputs, as lists.
- [#372](https://github.com/influxdb/telegraf/pull/372): Remove gosigar and update go-dockerclient for FreeBSD support. Thanks @MerlinDMC!
### Bugfixes
- [#331](https://github.com/influxdb/telegraf/pull/331): Dont overwrite host tag in redis plugin.
- [#336](https://github.com/influxdb/telegraf/pull/336): Mongodb plugin should take 2 measurements.
- [#351](https://github.com/influxdb/telegraf/issues/317): Fix continual "CREATE DATABASE" in writes
- [#360](https://github.com/influxdb/telegraf/pull/360): Apply prefix before ShouldPass check. Thanks @sotfo!
## v0.2.0 [2015-10-27]
### Release Notes
- The -test flag will now only output 2 collections for plugins that need it
- There is a new agent configuration option: `flush_interval`. This option tells
Telegraf how often to flush data to InfluxDB and other output sinks. For example,
users can set `interval = "2s"` and `flush_interval = "60s"` for Telegraf to
collect data every 2 seconds, and flush every 60 seconds.
- `precision` and `utc` are no longer valid agent config values. `precision` has
moved to the `influxdb` output config, where it will continue to default to "s"
- debug and test output will now print the raw line-protocol string
- Telegraf will now, by default, round the collection interval to the nearest
even interval. This means that `interval="10s"` will collect every :00, :10, etc.
To ease scale concerns, flushing will be "jittered" by a random amount so that
all Telegraf instances do not flush at the same time. Both of these options can
be controlled via the `round_interval` and `flush_jitter` config options.
- Telegraf will now retry metric flushes twice
### Features
- [#205](https://github.com/influxdb/telegraf/issues/205): Include per-db redis keyspace info
- [#226](https://github.com/influxdb/telegraf/pull/226): Add timestamps to points in Kafka/AMQP outputs. Thanks @ekini
- [#90](https://github.com/influxdb/telegraf/issues/90): Add Docker labels to tags in docker plugin
- [#223](https://github.com/influxdb/telegraf/pull/223): Add port tag to nginx plugin. Thanks @neezgee!
- [#227](https://github.com/influxdb/telegraf/pull/227): Add command intervals to exec plugin. Thanks @jpalay!
- [#241](https://github.com/influxdb/telegraf/pull/241): MQTT Output. Thanks @shirou!
- Memory plugin: cached and buffered measurements re-added
- Logging: additional logging for each collection interval, track the number
of metrics collected and from how many plugins.
- [#240](https://github.com/influxdb/telegraf/pull/240): procstat plugin, thanks @ranjib!
- [#244](https://github.com/influxdb/telegraf/pull/244): netstat plugin, thanks @shirou!
- [#262](https://github.com/influxdb/telegraf/pull/262): zookeeper plugin, thanks @jrxFive!
- [#237](https://github.com/influxdb/telegraf/pull/237): statsd service plugin, thanks @sparrc
- [#273](https://github.com/influxdb/telegraf/pull/273): puppet agent plugin, thats @jrxFive!
- [#280](https://github.com/influxdb/telegraf/issues/280): Use InfluxDB client v2.
- [#281](https://github.com/influxdb/telegraf/issues/281): Eliminate need to deep copy Batch Points.
- [#286](https://github.com/influxdb/telegraf/issues/286): bcache plugin, thanks @cornerot!
- [#287](https://github.com/influxdb/telegraf/issues/287): Batch AMQP output, thanks @ekini!
- [#301](https://github.com/influxdb/telegraf/issues/301): Collect on even intervals
- [#298](https://github.com/influxdb/telegraf/pull/298): Support retrying output writes
- [#300](https://github.com/influxdb/telegraf/issues/300): aerospike plugin. Thanks @oldmantaiter!
- [#322](https://github.com/influxdb/telegraf/issues/322): Librato output. Thanks @jipperinbham!
### Bugfixes
- [#228](https://github.com/influxdb/telegraf/pull/228): New version of package will replace old one. Thanks @ekini!
- [#232](https://github.com/influxdb/telegraf/pull/232): Fix bashism run during deb package installation. Thanks @yankcrime!
- [#261](https://github.com/influxdb/telegraf/issues/260): RabbitMQ panics if wrong credentials given. Thanks @ekini!
- [#245](https://github.com/influxdb/telegraf/issues/245): Document Exec plugin example. Thanks @ekini!
- [#264](https://github.com/influxdb/telegraf/issues/264): logrotate config file fixes. Thanks @linsomniac!
- [#290](https://github.com/influxdb/telegraf/issues/290): Fix some plugins sending their values as strings.
- [#289](https://github.com/influxdb/telegraf/issues/289): Fix accumulator panic on nil tags.
- [#302](https://github.com/influxdb/telegraf/issues/302): Fix `[tags]` getting applied, thanks @gotyaoi!
## v0.1.9 [2015-09-22]
### Release Notes
- InfluxDB output config change: `url` is now `urls`, and is a list. Config files
will still be backwards compatible if only `url` is specified.
- The -test flag will now output two metric collections
- Support for filtering telegraf outputs on the CLI -- Telegraf will now
allow filtering of output sinks on the command-line using the `-outputfilter`
flag, much like how the `-filter` flag works for plugins.
- Support for filtering on config-file creation -- Telegraf now supports
filtering to -sample-config command. You can now run
`telegraf -sample-config -filter cpu -outputfilter influxdb` to get a config
file with only the cpu plugin defined, and the influxdb output defined.
- **Breaking Change**: The CPU collection plugin has been refactored to fix some
bugs and outdated dependency issues. At the same time, I also decided to fix
a naming consistency issue, so cpu_percentageIdle will become cpu_usage_idle.
Also, all CPU time measurements now have it indicated in their name, so cpu_idle
will become cpu_time_idle. Additionally, cpu_time measurements are going to be
dropped in the default config.
- **Breaking Change**: The memory plugin has been refactored and some measurements
have been renamed for consistency. Some measurements have also been removed from being outputted. They are still being collected by gopsutil, and could easily be
re-added in a "verbose" mode if there is demand for it.
### Features
- [#143](https://github.com/influxdb/telegraf/issues/143): InfluxDB clustering support
- [#181](https://github.com/influxdb/telegraf/issues/181): Makefile GOBIN support. Thanks @Vye!
- [#203](https://github.com/influxdb/telegraf/pull/200): AMQP output. Thanks @ekini!
- [#182](https://github.com/influxdb/telegraf/pull/182): OpenTSDB output. Thanks @rplessl!
- [#187](https://github.com/influxdb/telegraf/pull/187): Retry output sink connections on startup.
- [#220](https://github.com/influxdb/telegraf/pull/220): Add port tag to apache plugin. Thanks @neezgee!
- [#217](https://github.com/influxdb/telegraf/pull/217): Add filtering for output sinks
and filtering when specifying a config file.
### Bugfixes
- [#170](https://github.com/influxdb/telegraf/issues/170): Systemd support
- [#175](https://github.com/influxdb/telegraf/issues/175): Set write precision before gathering metrics
- [#178](https://github.com/influxdb/telegraf/issues/178): redis plugin, multiple server thread hang bug
- Fix net plugin on darwin
- [#84](https://github.com/influxdb/telegraf/issues/84): Fix docker plugin on CentOS. Thanks @neezgee!
- [#189](https://github.com/influxdb/telegraf/pull/189): Fix mem_used_perc. Thanks @mced!
- [#192](https://github.com/influxdb/telegraf/issues/192): Increase compatibility of postgresql plugin. Now supports versions 8.1+
- [#203](https://github.com/influxdb/telegraf/issues/203): EL5 rpm support. Thanks @ekini!
- [#206](https://github.com/influxdb/telegraf/issues/206): CPU steal/guest values wrong on linux.
- [#212](https://github.com/influxdb/telegraf/issues/212): Add hashbang to postinstall script. Thanks @ekini!
- [#212](https://github.com/influxdb/telegraf/issues/212): Fix makefile warning. Thanks @ekini!
## v0.1.8 [2015-09-04]
### Release Notes
- Telegraf will now write data in UTC at second precision by default
- Now using Go 1.5 to build telegraf
### Features
- [#150](https://github.com/influxdb/telegraf/pull/150): Add Host Uptime metric to system plugin
- [#158](https://github.com/influxdb/telegraf/pull/158): Apache Plugin. Thanks @KPACHbIuLLIAnO4
- [#159](https://github.com/influxdb/telegraf/pull/159): Use second precision for InfluxDB writes
- [#165](https://github.com/influxdb/telegraf/pull/165): Add additional metrics to mysql plugin. Thanks @nickscript0
- [#162](https://github.com/influxdb/telegraf/pull/162): Write UTC by default, provide option
- [#166](https://github.com/influxdb/telegraf/pull/166): Upload binaries to S3
- [#169](https://github.com/influxdb/telegraf/pull/169): Ping plugin
### Bugfixes
## v0.1.7 [2015-08-28]
### Features
- [#38](https://github.com/influxdb/telegraf/pull/38): Kafka output producer.
- [#133](https://github.com/influxdb/telegraf/pull/133): Add plugin.Gather error logging. Thanks @nickscript0!
- [#136](https://github.com/influxdb/telegraf/issues/136): Add a -usage flag for printing usage of a single plugin.
- [#137](https://github.com/influxdb/telegraf/issues/137): Memcached: fix when a value contains a space
- [#138](https://github.com/influxdb/telegraf/issues/138): MySQL server address tag.
- [#142](https://github.com/influxdb/telegraf/pull/142): Add Description and SampleConfig funcs to output interface
- Indent the toml config file for readability
### Bugfixes
- [#128](https://github.com/influxdb/telegraf/issues/128): system_load measurement missing.
- [#129](https://github.com/influxdb/telegraf/issues/129): Latest pkg url fix.
- [#131](https://github.com/influxdb/telegraf/issues/131): Fix memory reporting on linux & darwin. Thanks @subhachandrachandra!
- [#140](https://github.com/influxdb/telegraf/issues/140): Memory plugin prec->perc typo fix. Thanks @brunoqc!
## v0.1.6 [2015-08-20]
### Features
- [#112](https://github.com/influxdb/telegraf/pull/112): Datadog output. Thanks @jipperinbham!
- [#116](https://github.com/influxdb/telegraf/pull/116): Use godep to vendor all dependencies
- [#120](https://github.com/influxdb/telegraf/pull/120): Httpjson plugin. Thanks @jpalay & @alvaromorales!
### Bugfixes
- [#113](https://github.com/influxdb/telegraf/issues/113): Update README with Telegraf/InfluxDB compatibility
- [#118](https://github.com/influxdb/telegraf/pull/118): Fix for disk usage stats in Windows. Thanks @srfraser!
- [#122](https://github.com/influxdb/telegraf/issues/122): Fix for DiskUsage segv fault. Thanks @srfraser!
- [#126](https://github.com/influxdb/telegraf/issues/126): Nginx plugin not catching net.SplitHostPort error
## v0.1.5 [2015-08-13]
### Features
- [#54](https://github.com/influxdb/telegraf/pull/54): MongoDB plugin. Thanks @jipperinbham!
- [#55](https://github.com/influxdb/telegraf/pull/55): Elasticsearch plugin. Thanks @brocaar!
- [#71](https://github.com/influxdb/telegraf/pull/71): HAProxy plugin. Thanks @kureikain!
- [#72](https://github.com/influxdb/telegraf/pull/72): Adding TokuDB metrics to MySQL. Thanks vadimtk!
- [#73](https://github.com/influxdb/telegraf/pull/73): RabbitMQ plugin. Thanks @ianunruh!
- [#77](https://github.com/influxdb/telegraf/issues/77): Automatically create database.
- [#79](https://github.com/influxdb/telegraf/pull/56): Nginx plugin. Thanks @codeb2cc!
- [#86](https://github.com/influxdb/telegraf/pull/86): Lustre2 plugin. Thanks srfraser!
- [#91](https://github.com/influxdb/telegraf/pull/91): Unit testing
- [#92](https://github.com/influxdb/telegraf/pull/92): Exec plugin. Thanks @alvaromorales!
- [#98](https://github.com/influxdb/telegraf/pull/98): LeoFS plugin. Thanks @mocchira!
- [#103](https://github.com/influxdb/telegraf/pull/103): Filter by metric tags. Thanks @srfraser!
- [#106](https://github.com/influxdb/telegraf/pull/106): Options to filter plugins on startup. Thanks @zepouet!
- [#107](https://github.com/influxdb/telegraf/pull/107): Multiple outputs beyong influxdb. Thanks @jipperinbham!
- [#108](https://github.com/influxdb/telegraf/issues/108): Support setting per-CPU and total-CPU gathering.
- [#111](https://github.com/influxdb/telegraf/pull/111): Report CPU Usage in cpu plugin. Thanks @jpalay!
### Bugfixes
- [#85](https://github.com/influxdb/telegraf/pull/85): Fix GetLocalHost testutil function for mac users
- [#89](https://github.com/influxdb/telegraf/pull/89): go fmt fixes
- [#94](https://github.com/influxdb/telegraf/pull/94): Fix for issue #93, explicitly call sarama.v1 -> sarama
- [#101](https://github.com/influxdb/telegraf/issues/101): switch back from master branch if building locally
- [#99](https://github.com/influxdb/telegraf/issues/99): update integer output to new InfluxDB line protocol format
## v0.1.4 [2015-07-09]
### Features
- [#56](https://github.com/influxdb/telegraf/pull/56): Update README for Kafka plugin. Thanks @EmilS!
### Bugfixes
- [#50](https://github.com/influxdb/telegraf/pull/50): Fix init.sh script to use telegraf directory. Thanks @jseriff!
- [#52](https://github.com/influxdb/telegraf/pull/52): Update CHANGELOG to reference updated directory. Thanks @benfb!
## v0.1.3 [2015-07-05]
### Features
- [#35](https://github.com/influxdb/influxdb/pull/35): Add Kafka plugin. Thanks @EmilS!
- [#47](https://github.com/influxdb/influxdb/pull/47): Add RethinkDB plugin. Thanks @jipperinbham!
- [#35](https://github.com/influxdb/telegraf/pull/35): Add Kafka plugin. Thanks @EmilS!
- [#47](https://github.com/influxdb/telegraf/pull/47): Add RethinkDB plugin. Thanks @jipperinbham!
### Bugfixes
- [#45](https://github.com/influxdb/influxdb/pull/45): Skip disk tags that don't have a value. Thanks @jhofeditz!
- [#43](https://github.com/influxdb/influxdb/pull/43): Fix bug in MySQL plugin. Thanks @marcosnils!
- [#45](https://github.com/influxdb/telegraf/pull/45): Skip disk tags that don't have a value. Thanks @jhofeditz!
- [#43](https://github.com/influxdb/telegraf/pull/43): Fix bug in MySQL plugin. Thanks @marcosnils!
## v0.1.2 [2015-07-01]
### Features
- [#12](https://github.com/influxdb/influxdb/pull/12): Add Linux/ARM to the list of built binaries. Thanks @voxxit!
- [#14](https://github.com/influxdb/influxdb/pull/14): Clarify the S3 buckets that Telegraf is pushed to.
- [#16](https://github.com/influxdb/influxdb/pull/16): Convert Redis to use URI, support Redis AUTH. Thanks @jipperinbham!
- [#21](https://github.com/influxdb/influxdb/pull/21): Add memcached plugin. Thanks @Yukki!
- [#12](https://github.com/influxdb/telegraf/pull/12): Add Linux/ARM to the list of built binaries. Thanks @voxxit!
- [#14](https://github.com/influxdb/telegraf/pull/14): Clarify the S3 buckets that Telegraf is pushed to.
- [#16](https://github.com/influxdb/telegraf/pull/16): Convert Redis to use URI, support Redis AUTH. Thanks @jipperinbham!
- [#21](https://github.com/influxdb/telegraf/pull/21): Add memcached plugin. Thanks @Yukki!
### Bugfixes
- [#13](https://github.com/influxdb/influxdb/pull/13): Fix the packaging script.
- [#19](https://github.com/influxdb/influxdb/pull/19): Add host name to metric tags. Thanks @sherifzain!
- [#20](https://github.com/influxdb/influxdb/pull/20): Fix race condition with accumulator mutex. Thanks @nkatsaros!
- [#23](https://github.com/influxdb/influxdb/pull/23): Change name of folder for packages. Thanks @colinrymer!
- [#32](https://github.com/influxdb/influxdb/pull/32): Fix spelling of memoory -> memory. Thanks @tylernisonoff!
- [#13](https://github.com/influxdb/telegraf/pull/13): Fix the packaging script.
- [#19](https://github.com/influxdb/telegraf/pull/19): Add host name to metric tags. Thanks @sherifzain!
- [#20](https://github.com/influxdb/telegraf/pull/20): Fix race condition with accumulator mutex. Thanks @nkatsaros!
- [#23](https://github.com/influxdb/telegraf/pull/23): Change name of folder for packages. Thanks @colinrymer!
- [#32](https://github.com/influxdb/telegraf/pull/32): Fix spelling of memoory -> memory. Thanks @tylernisonoff!
## v0.1.1 [2015-06-19]

293
CONTRIBUTING.md Normal file
View File

@@ -0,0 +1,293 @@
## Sign the CLA
Before we can merge a pull request, you will need to sign the CLA,
which can be found [on our website](http://influxdb.com/community/cla.html)
## Plugins
This section is for developers who want to create new collection plugins.
Telegraf is entirely plugin driven. This interface allows for operators to
pick and chose what is gathered as well as makes it easy for developers
to create new ways of generating metrics.
Plugin authorship is kept as simple as possible to promote people to develop
and submit new plugins.
### Plugin Guidelines
* A plugin must conform to the `plugins.Plugin` interface.
* Each generated metric automatically has the name of the plugin that generated
it prepended. This is to keep plugins honest.
* Plugins should call `plugins.Add` in their `init` function to register themselves.
See below for a quick example.
* To be available within Telegraf itself, plugins must add themselves to the
`github.com/influxdb/telegraf/plugins/all/all.go` file.
* The `SampleConfig` function should return valid toml that describes how the
plugin can be configured. This is include in `telegraf -sample-config`.
* The `Description` function should say in one line what this plugin does.
### Plugin interface
```go
type Plugin interface {
SampleConfig() string
Description() string
Gather(Accumulator) error
}
type Accumulator interface {
Add(measurement string,
value interface{},
tags map[string]string,
timestamp ...time.Time)
AddFields(measurement string,
fields map[string]interface{},
tags map[string]string,
timestamp ...time.Time)
}
```
### Accumulator
The way that a plugin emits metrics is by interacting with the Accumulator.
The `Add` function takes 3 arguments:
* **measurement**: A string description of the metric. For instance `bytes_read` or `faults`.
* **value**: A value for the metric. This accepts 5 different types of value:
* **int**: The most common type. All int types are accepted but favor using `int64`
Useful for counters, etc.
* **float**: Favor `float64`, useful for gauges, percentages, etc.
* **bool**: `true` or `false`, useful to indicate the presence of a state. `light_on`, etc.
* **string**: Typically used to indicate a message, or some kind of freeform information.
* **time.Time**: Useful for indicating when a state last occurred, for instance `light_on_since`.
* **tags**: This is a map of strings to strings to describe the where or who
about the metric. For instance, the `net` plugin adds a tag named `"interface"`
set to the name of the network interface, like `"eth0"`.
The `AddFieldsWithTime` allows multiple values for a point to be passed. The values
used are the same type profile as **value** above. The **timestamp** argument
allows a point to be registered as having occurred at an arbitrary time.
Let's say you've written a plugin that emits metrics about processes on the current host.
```go
type Process struct {
CPUTime float64
MemoryBytes int64
PID int
}
func Gather(acc plugins.Accumulator) error {
for _, process := range system.Processes() {
tags := map[string]string {
"pid": fmt.Sprintf("%d", process.Pid),
}
acc.Add("cpu", process.CPUTime, tags, time.Now())
acc.Add("memory", process.MemoryBytes, tags, time.Now())
}
}
```
### Plugin Example
```go
package simple
// simple.go
import "github.com/influxdb/telegraf/plugins"
type Simple struct {
Ok bool
}
func (s *Simple) Description() string {
return "a demo plugin"
}
func (s *Simple) SampleConfig() string {
return "ok = true # indicate if everything is fine"
}
func (s *Simple) Gather(acc plugins.Accumulator) error {
if s.Ok {
acc.Add("state", "pretty good", nil)
} else {
acc.Add("state", "not great", nil)
}
return nil
}
func init() {
plugins.Add("simple", func() plugins.Plugin { return &Simple{} })
}
```
## Service Plugins
This section is for developers who want to create new "service" collection
plugins. A service plugin differs from a regular plugin in that it operates
a background service while Telegraf is running. One example would be the `statsd`
plugin, which operates a statsd server.
Service Plugins are substantially more complicated than a regular plugin, as they
will require threads and locks to verify data integrity. Service Plugins should
be avoided unless there is no way to create their behavior with a regular plugin.
Their interface is quite similar to a regular plugin, with the addition of `Start()`
and `Stop()` methods.
### Service Plugin Guidelines
* Same as the `Plugin` guidelines, except that they must conform to the
`plugins.ServicePlugin` interface.
### Service Plugin interface
```go
type ServicePlugin interface {
SampleConfig() string
Description() string
Gather(Accumulator) error
Start() error
Stop()
}
```
## Outputs
This section is for developers who want to create a new output sink. Outputs
are created in a similar manner as collection plugins, and their interface has
similar constructs.
### Output Guidelines
* An output must conform to the `outputs.Output` interface.
* Outputs should call `outputs.Add` in their `init` function to register themselves.
See below for a quick example.
* To be available within Telegraf itself, plugins must add themselves to the
`github.com/influxdb/telegraf/outputs/all/all.go` file.
* The `SampleConfig` function should return valid toml that describes how the
output can be configured. This is include in `telegraf -sample-config`.
* The `Description` function should say in one line what this output does.
### Output interface
```go
type Output interface {
Connect() error
Close() error
Description() string
SampleConfig() string
Write(points []*client.Point) error
}
```
### Output Example
```go
package simpleoutput
// simpleoutput.go
import "github.com/influxdb/telegraf/outputs"
type Simple struct {
Ok bool
}
func (s *Simple) Description() string {
return "a demo output"
}
func (s *Simple) SampleConfig() string {
return "url = localhost"
}
func (s *Simple) Connect() error {
// Make a connection to the URL here
return nil
}
func (s *Simple) Close() error {
// Close connection to the URL here
return nil
}
func (s *Simple) Write(points []*client.Point) error {
for _, pt := range points {
// write `pt` to the output sink here
}
return nil
}
func init() {
outputs.Add("simpleoutput", func() outputs.Output { return &Simple{} })
}
```
## Service Outputs
This section is for developers who want to create new "service" output. A
service output differs from a regular output in that it operates a background service
while Telegraf is running. One example would be the `prometheus_client` output,
which operates an HTTP server.
Their interface is quite similar to a regular output, with the addition of `Start()`
and `Stop()` methods.
### Service Output Guidelines
* Same as the `Output` guidelines, except that they must conform to the
`plugins.ServiceOutput` interface.
### Service Output interface
```go
type ServiceOutput interface {
Connect() error
Close() error
Description() string
SampleConfig() string
Write(points []*client.Point) error
Start() error
Stop()
}
```
## Unit Tests
### Execute short tests
execute `make test-short`
### Execute long tests
As Telegraf collects metrics from several third-party services it becomes a
difficult task to mock each service as some of them have complicated protocols
which would take some time to replicate.
To overcome this situation we've decided to use docker containers to provide a
fast and reproducible environment to test those services which require it.
For other situations
(i.e: https://github.com/influxdb/telegraf/blob/master/plugins/redis/redis_test.go )
a simple mock will suffice.
To execute Telegraf tests follow these simple steps:
- Install docker following [these](https://docs.docker.com/installation/)
instructions
- execute `make test`
**OSX users**: you will need to install `boot2docker` or `docker-machine`.
The Makefile will assume that you have a `docker-machine` box called `default` to
get the IP address.
### Unit test troubleshooting
Try cleaning up your test environment by executing `make docker-kill` and
re-running

238
Godeps/Godeps.json generated Normal file
View File

@@ -0,0 +1,238 @@
{
"ImportPath": "github.com/influxdb/telegraf",
"GoVersion": "go1.5.1",
"Deps": [
{
"ImportPath": "bitbucket.org/ww/goautoneg",
"Comment": "null-5",
"Rev": "75cd24fc2f2c2a2088577d12123ddee5f54e0675"
},
{
"ImportPath": "git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git",
"Comment": "v0.9.1-14-g546c47a",
"Rev": "546c47a6d0e9492e77f6f37473d59c36a708e08b"
},
{
"ImportPath": "github.com/Shopify/sarama",
"Comment": "v1.4.3-45-g5b18996",
"Rev": "5b18996ef1cd555a60562ae4c5d7843ae137e12d"
},
{
"ImportPath": "github.com/Sirupsen/logrus",
"Comment": "v0.8.6-7-g9c060de",
"Rev": "9c060de643590dae45da9d7c26276463bfc46fa0"
},
{
"ImportPath": "github.com/armon/go-metrics",
"Rev": "b2d95e5291cdbc26997d1301a5e467ecbb240e25"
},
{
"ImportPath": "github.com/beorn7/perks/quantile",
"Rev": "b965b613227fddccbfffe13eae360ed3fa822f8d"
},
{
"ImportPath": "github.com/boltdb/bolt",
"Comment": "v1.0-117-g0f053fa",
"Rev": "0f053fabc06119583d61937a0a06ef0ba0f1b301"
},
{
"ImportPath": "github.com/cenkalti/backoff",
"Rev": "4dc77674aceaabba2c7e3da25d4c823edfb73f99"
},
{
"ImportPath": "github.com/dancannon/gorethink/encoding",
"Comment": "v1.x.x-1-g786f12a",
"Rev": "786f12ae730ea93485c4eb2c44b3ede6e1e8745f"
},
{
"ImportPath": "github.com/dancannon/gorethink/ql2",
"Comment": "v1.x.x-1-g786f12a",
"Rev": "786f12ae730ea93485c4eb2c44b3ede6e1e8745f"
},
{
"ImportPath": "github.com/dancannon/gorethink/types",
"Comment": "v1.x.x-1-g786f12a",
"Rev": "786f12ae730ea93485c4eb2c44b3ede6e1e8745f"
},
{
"ImportPath": "github.com/eapache/go-resiliency/breaker",
"Comment": "v1.0.0-1-ged0319b",
"Rev": "ed0319b32e66e3295db52695ba3ee493e823fbfe"
},
{
"ImportPath": "github.com/eapache/queue",
"Comment": "v1.0.2",
"Rev": "ded5959c0d4e360646dc9e9908cff48666781367"
},
{
"ImportPath": "github.com/fsouza/go-dockerclient",
"Rev": "ef410296f87750305e1e1acf9ad2ba3833dcb004"
},
{
"ImportPath": "github.com/go-sql-driver/mysql",
"Comment": "v1.2-118-g3dd7008",
"Rev": "3dd7008ac1529aca1bcd8a9db75228a71ba23cac"
},
{
"ImportPath": "github.com/gogo/protobuf/proto",
"Rev": "cabd153b69f71bab8b89fd667a2d9bb28c92ceb4"
},
{
"ImportPath": "github.com/golang/protobuf/proto",
"Rev": "73aaaa9eb61d74fbf7e256ca586a3a565b308eea"
},
{
"ImportPath": "github.com/golang/snappy",
"Rev": "723cc1e459b8eea2dea4583200fd60757d40097a"
},
{
"ImportPath": "github.com/gonuts/go-shellquote",
"Rev": "e842a11b24c6abfb3dd27af69a17f482e4b483c2"
},
{
"ImportPath": "github.com/hashicorp/go-msgpack/codec",
"Rev": "fa3f63826f7c23912c15263591e65d54d080b458"
},
{
"ImportPath": "github.com/hashicorp/raft",
"Rev": "9b586e29edf1ed085b11da7772479ee45c433996"
},
{
"ImportPath": "github.com/hashicorp/raft-boltdb",
"Rev": "d1e82c1ec3f15ee991f7cc7ffd5b67ff6f5bbaee"
},
{
"ImportPath": "github.com/influxdb/influxdb",
"Comment": "v0.9.4-rc1-922-gb0e9f7e",
"Rev": "b0e9f7e844225b05abf9f4455229490f99348ac4"
},
{
"ImportPath": "github.com/lib/pq",
"Comment": "go1.0-cutoff-59-gb269bd0",
"Rev": "b269bd035a727d6c1081f76e7a239a1b00674c40"
},
{
"ImportPath": "github.com/matttproud/golang_protobuf_extensions/pbutil",
"Rev": "fc2b8d3a73c4867e51861bbdd5ae3c1f0869dd6a"
},
{
"ImportPath": "github.com/mreiferson/go-snappystream",
"Comment": "v0.2.3",
"Rev": "028eae7ab5c4c9e2d1cb4c4ca1e53259bbe7e504"
},
{
"ImportPath": "github.com/naoina/go-stringutil",
"Rev": "360db0db4b01d34e12a2ec042c09e7d37fece761"
},
{
"ImportPath": "github.com/naoina/toml",
"Rev": "5811abcabb29d6af0fdf060f96d328962bd3cd5e"
},
{
"ImportPath": "github.com/nsqio/go-nsq",
"Comment": "v1.0.5-6-g2118015",
"Rev": "2118015c120962edc5d03325c680daf3163a8b5f"
},
{
"ImportPath": "github.com/pborman/uuid",
"Rev": "ca53cad383cad2479bbba7f7a1a05797ec1386e4"
},
{
"ImportPath": "github.com/prometheus/client_golang/prometheus",
"Comment": "0.7.0-52-ge51041b",
"Rev": "e51041b3fa41cece0dca035740ba6411905be473"
},
{
"ImportPath": "github.com/prometheus/client_model/go",
"Comment": "model-0.0.2-12-gfa8ad6f",
"Rev": "fa8ad6fec33561be4280a8f0514318c79d7f6cb6"
},
{
"ImportPath": "github.com/prometheus/common/expfmt",
"Rev": "369ec0491ce7be15431bd4f23b7fa17308f94190"
},
{
"ImportPath": "github.com/prometheus/common/model",
"Rev": "369ec0491ce7be15431bd4f23b7fa17308f94190"
},
{
"ImportPath": "github.com/prometheus/procfs",
"Rev": "454a56f35412459b5e684fd5ec0f9211b94f002a"
},
{
"ImportPath": "github.com/samuel/go-zookeeper/zk",
"Rev": "5bb5cfc093ad18a28148c578f8632cfdb4d802e4"
},
{
"ImportPath": "github.com/shirou/gopsutil",
"Comment": "1.0.0-173-g1e9aabb",
"Rev": "1e9aabb3c8132314662698c9d1c0aef68d9da617"
},
{
"ImportPath": "github.com/streadway/amqp",
"Rev": "f4879ba28fffbb576743b03622a9ff20461826b2"
},
{
"ImportPath": "github.com/stretchr/objx",
"Rev": "cbeaeb16a013161a98496fad62933b1d21786672"
},
{
"ImportPath": "github.com/stretchr/testify/assert",
"Comment": "v1.0-21-gf552045",
"Rev": "f5520455607c0233cb6d7b056f71b22c1d265ef1"
},
{
"ImportPath": "github.com/stretchr/testify/mock",
"Comment": "v1.0-21-gf552045",
"Rev": "f5520455607c0233cb6d7b056f71b22c1d265ef1"
},
{
"ImportPath": "github.com/stretchr/testify/require",
"Comment": "v1.0-21-gf552045",
"Rev": "f5520455607c0233cb6d7b056f71b22c1d265ef1"
},
{
"ImportPath": "github.com/stretchr/testify/suite",
"Comment": "v1.0-21-gf552045",
"Rev": "f5520455607c0233cb6d7b056f71b22c1d265ef1"
},
{
"ImportPath": "github.com/wvanbergen/kafka/consumergroup",
"Rev": "b0e5c20a0d7c3ccfd37a5965ae30a3a0fd15945d"
},
{
"ImportPath": "github.com/wvanbergen/kazoo-go",
"Rev": "02a3868e9b87153285439cd27a39c0a2984a13af"
},
{
"ImportPath": "golang.org/x/crypto/bcrypt",
"Rev": "173ce04bfaf66c7bb0fa9d5c0bfd93e773909dbd"
},
{
"ImportPath": "golang.org/x/crypto/blowfish",
"Rev": "173ce04bfaf66c7bb0fa9d5c0bfd93e773909dbd"
},
{
"ImportPath": "golang.org/x/net/websocket",
"Rev": "db8e4de5b2d6653f66aea53094624468caad15d2"
},
{
"ImportPath": "gopkg.in/dancannon/gorethink.v1",
"Comment": "v1.x.x",
"Rev": "8aca6ba2cc6e873299617d730fac0d7f6593113a"
},
{
"ImportPath": "gopkg.in/fatih/pool.v2",
"Rev": "cba550ebf9bce999a02e963296d4bc7a486cb715"
},
{
"ImportPath": "gopkg.in/mgo.v2",
"Comment": "r2015.06.03-3-g3569c88",
"Rev": "3569c88678d88179dcbd68d02ab081cbca3cd4d0"
},
{
"ImportPath": "gopkg.in/yaml.v2",
"Rev": "7ad95dd0798a40da1ccdff6dff35fd177b5edf40"
}
]
}

5
Godeps/Readme generated Normal file
View File

@@ -0,0 +1,5 @@
This directory tree is generated automatically by godep.
Please do not edit.
See https://github.com/tools/godep for more information.

2
Godeps/_workspace/.gitignore generated vendored Normal file
View File

@@ -0,0 +1,2 @@
/pkg
/bin

View File

@@ -0,0 +1,13 @@
include $(GOROOT)/src/Make.inc
TARG=bitbucket.org/ww/goautoneg
GOFILES=autoneg.go
include $(GOROOT)/src/Make.pkg
format:
gofmt -w *.go
docs:
gomake clean
godoc ${TARG} > README.txt

View File

@@ -0,0 +1,67 @@
PACKAGE
package goautoneg
import "bitbucket.org/ww/goautoneg"
HTTP Content-Type Autonegotiation.
The functions in this package implement the behaviour specified in
http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html
Copyright (c) 2011, Open Knowledge Foundation Ltd.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
Neither the name of the Open Knowledge Foundation Ltd. nor the
names of its contributors may be used to endorse or promote
products derived from this software without specific prior written
permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
FUNCTIONS
func Negotiate(header string, alternatives []string) (content_type string)
Negotiate the most appropriate content_type given the accept header
and a list of alternatives.
func ParseAccept(header string) (accept []Accept)
Parse an Accept Header string returning a sorted list
of clauses
TYPES
type Accept struct {
Type, SubType string
Q float32
Params map[string]string
}
Structure to represent a clause in an HTTP Accept Header
SUBDIRECTORIES
.hg

View File

@@ -0,0 +1,162 @@
/*
HTTP Content-Type Autonegotiation.
The functions in this package implement the behaviour specified in
http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html
Copyright (c) 2011, Open Knowledge Foundation Ltd.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
Neither the name of the Open Knowledge Foundation Ltd. nor the
names of its contributors may be used to endorse or promote
products derived from this software without specific prior written
permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package goautoneg
import (
"sort"
"strconv"
"strings"
)
// Structure to represent a clause in an HTTP Accept Header
type Accept struct {
Type, SubType string
Q float64
Params map[string]string
}
// For internal use, so that we can use the sort interface
type accept_slice []Accept
func (accept accept_slice) Len() int {
slice := []Accept(accept)
return len(slice)
}
func (accept accept_slice) Less(i, j int) bool {
slice := []Accept(accept)
ai, aj := slice[i], slice[j]
if ai.Q > aj.Q {
return true
}
if ai.Type != "*" && aj.Type == "*" {
return true
}
if ai.SubType != "*" && aj.SubType == "*" {
return true
}
return false
}
func (accept accept_slice) Swap(i, j int) {
slice := []Accept(accept)
slice[i], slice[j] = slice[j], slice[i]
}
// Parse an Accept Header string returning a sorted list
// of clauses
func ParseAccept(header string) (accept []Accept) {
parts := strings.Split(header, ",")
accept = make([]Accept, 0, len(parts))
for _, part := range parts {
part := strings.Trim(part, " ")
a := Accept{}
a.Params = make(map[string]string)
a.Q = 1.0
mrp := strings.Split(part, ";")
media_range := mrp[0]
sp := strings.Split(media_range, "/")
a.Type = strings.Trim(sp[0], " ")
switch {
case len(sp) == 1 && a.Type == "*":
a.SubType = "*"
case len(sp) == 2:
a.SubType = strings.Trim(sp[1], " ")
default:
continue
}
if len(mrp) == 1 {
accept = append(accept, a)
continue
}
for _, param := range mrp[1:] {
sp := strings.SplitN(param, "=", 2)
if len(sp) != 2 {
continue
}
token := strings.Trim(sp[0], " ")
if token == "q" {
a.Q, _ = strconv.ParseFloat(sp[1], 32)
} else {
a.Params[token] = strings.Trim(sp[1], " ")
}
}
accept = append(accept, a)
}
slice := accept_slice(accept)
sort.Sort(slice)
return
}
// Negotiate the most appropriate content_type given the accept header
// and a list of alternatives.
func Negotiate(header string, alternatives []string) (content_type string) {
asp := make([][]string, 0, len(alternatives))
for _, ctype := range alternatives {
asp = append(asp, strings.SplitN(ctype, "/", 2))
}
for _, clause := range ParseAccept(header) {
for i, ctsp := range asp {
if clause.Type == ctsp[0] && clause.SubType == ctsp[1] {
content_type = alternatives[i]
return
}
if clause.Type == ctsp[0] && clause.SubType == "*" {
content_type = alternatives[i]
return
}
if clause.Type == "*" && clause.SubType == "*" {
content_type = alternatives[i]
return
}
}
}
return
}

View File

@@ -0,0 +1,36 @@
# Compiled Object files, Static and Dynamic libs (Shared Objects)
*.o
*.a
*.so
# Folders
_obj
_test
# Architecture specific extensions/prefixes
*.[568vq]
[568vq].out
*.cgo1.go
*.cgo2.c
_cgo_defun.c
_cgo_gotypes.go
_cgo_export.*
_testmain.go
*.exe
*.msg
*.lok
samples/trivial
samples/trivial2
samples/sample
samples/reconnect
samples/ssl
samples/custom_store
samples/simple
samples/stdinpub
samples/stdoutsub
samples/routing

View File

@@ -0,0 +1,69 @@
Contributing to Paho
====================
Thanks for your interest in this project.
Project description:
--------------------
The Paho project has been created to provide scalable open-source implementations of open and standard messaging protocols aimed at new, existing, and emerging applications for Machine-to-Machine (M2M) and Internet of Things (IoT).
Paho reflects the inherent physical and cost constraints of device connectivity. Its objectives include effective levels of decoupling between devices and applications, designed to keep markets open and encourage the rapid growth of scalable Web and Enterprise middleware and applications. Paho is being kicked off with MQTT publish/subscribe client implementations for use on embedded platforms, along with corresponding server support as determined by the community.
- https://projects.eclipse.org/projects/technology.paho
Developer resources:
--------------------
Information regarding source code management, builds, coding standards, and more.
- https://projects.eclipse.org/projects/technology.paho/developer
Contributor License Agreement:
------------------------------
Before your contribution can be accepted by the project, you need to create and electronically sign the Eclipse Foundation Contributor License Agreement (CLA).
- http://www.eclipse.org/legal/CLA.php
Contributing Code:
------------------
The Go client uses git with Gerrit for code review, use the following URLs for Gerrit access;
ssh://<username>@git.eclipse.org:29418/paho/org.eclipse.paho.mqtt.golang
Configure a remote called review to push your changes to;
git config remote.review.url ssh://<username>@git.eclipse.org:29418/paho/org.eclipse.paho.mqtt.golang
git config remote.review.push HEAD:refs/for/<branch>
When you have made and committed a change you can push it to Gerrit for review with;
git push review
See https://wiki.eclipse.org/Gerrit for more details on how Gerrit is used in Eclipse, https://wiki.eclipse.org/Gerrit#Gerrit_Code_Review_Cheatsheet has some particularly useful information.
Git commit messages should follow the style described here;
http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html
Contact:
--------
Contact the project developers via the project's "dev" list.
- https://dev.eclipse.org/mailman/listinfo/paho-dev
Search for bugs:
----------------
This project uses Bugzilla to track ongoing development and issues.
- https://bugs.eclipse.org/bugs/buglist.cgi?product=Paho&component=MQTT-Go
Create a new bug:
-----------------
Be sure to search for existing bugs before you create another one. Remember that contributions are always welcome!
- https://bugs.eclipse.org/bugs/enter_bug.cgi?product=Paho

View File

@@ -0,0 +1,15 @@
Eclipse Distribution License - v 1.0
Copyright (c) 2007, Eclipse Foundation, Inc. and its licensors.
All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
Neither the name of the Eclipse Foundation, Inc. nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View File

@@ -0,0 +1,87 @@
Eclipse Public License - v 1.0
THE ACCOMPANYING PROGRAM IS PROVIDED UNDER THE TERMS OF THIS ECLIPSE PUBLIC LICENSE ("AGREEMENT"). ANY USE, REPRODUCTION OR DISTRIBUTION OF THE PROGRAM CONSTITUTES RECIPIENT'S ACCEPTANCE OF THIS AGREEMENT.
1. DEFINITIONS
"Contribution" means:
a) in the case of the initial Contributor, the initial code and documentation distributed under this Agreement, and
b) in the case of each subsequent Contributor:
i) changes to the Program, and
ii) additions to the Program;
where such changes and/or additions to the Program originate from and are distributed by that particular Contributor. A Contribution 'originates' from a Contributor if it was added to the Program by such Contributor itself or anyone acting on such Contributor's behalf. Contributions do not include additions to the Program which: (i) are separate modules of software distributed in conjunction with the Program under their own license agreement, and (ii) are not derivative works of the Program.
"Contributor" means any person or entity that distributes the Program.
"Licensed Patents" mean patent claims licensable by a Contributor which are necessarily infringed by the use or sale of its Contribution alone or when combined with the Program.
"Program" means the Contributions distributed in accordance with this Agreement.
"Recipient" means anyone who receives the Program under this Agreement, including all Contributors.
2. GRANT OF RIGHTS
a) Subject to the terms of this Agreement, each Contributor hereby grants Recipient a non-exclusive, worldwide, royalty-free copyright license to reproduce, prepare derivative works of, publicly display, publicly perform, distribute and sublicense the Contribution of such Contributor, if any, and such derivative works, in source code and object code form.
b) Subject to the terms of this Agreement, each Contributor hereby grants Recipient a non-exclusive, worldwide, royalty-free patent license under Licensed Patents to make, use, sell, offer to sell, import and otherwise transfer the Contribution of such Contributor, if any, in source code and object code form. This patent license shall apply to the combination of the Contribution and the Program if, at the time the Contribution is added by the Contributor, such addition of the Contribution causes such combination to be covered by the Licensed Patents. The patent license shall not apply to any other combinations which include the Contribution. No hardware per se is licensed hereunder.
c) Recipient understands that although each Contributor grants the licenses to its Contributions set forth herein, no assurances are provided by any Contributor that the Program does not infringe the patent or other intellectual property rights of any other entity. Each Contributor disclaims any liability to Recipient for claims brought by any other entity based on infringement of intellectual property rights or otherwise. As a condition to exercising the rights and licenses granted hereunder, each Recipient hereby assumes sole responsibility to secure any other intellectual property rights needed, if any. For example, if a third party patent license is required to allow Recipient to distribute the Program, it is Recipient's responsibility to acquire that license before distributing the Program.
d) Each Contributor represents that to its knowledge it has sufficient copyright rights in its Contribution, if any, to grant the copyright license set forth in this Agreement.
3. REQUIREMENTS
A Contributor may choose to distribute the Program in object code form under its own license agreement, provided that:
a) it complies with the terms and conditions of this Agreement; and
b) its license agreement:
i) effectively disclaims on behalf of all Contributors all warranties and conditions, express and implied, including warranties or conditions of title and non-infringement, and implied warranties or conditions of merchantability and fitness for a particular purpose;
ii) effectively excludes on behalf of all Contributors all liability for damages, including direct, indirect, special, incidental and consequential damages, such as lost profits;
iii) states that any provisions which differ from this Agreement are offered by that Contributor alone and not by any other party; and
iv) states that source code for the Program is available from such Contributor, and informs licensees how to obtain it in a reasonable manner on or through a medium customarily used for software exchange.
When the Program is made available in source code form:
a) it must be made available under this Agreement; and
b) a copy of this Agreement must be included with each copy of the Program.
Contributors may not remove or alter any copyright notices contained within the Program.
Each Contributor must identify itself as the originator of its Contribution, if any, in a manner that reasonably allows subsequent Recipients to identify the originator of the Contribution.
4. COMMERCIAL DISTRIBUTION
Commercial distributors of software may accept certain responsibilities with respect to end users, business partners and the like. While this license is intended to facilitate the commercial use of the Program, the Contributor who includes the Program in a commercial product offering should do so in a manner which does not create potential liability for other Contributors. Therefore, if a Contributor includes the Program in a commercial product offering, such Contributor ("Commercial Contributor") hereby agrees to defend and indemnify every other Contributor ("Indemnified Contributor") against any losses, damages and costs (collectively "Losses") arising from claims, lawsuits and other legal actions brought by a third party against the Indemnified Contributor to the extent caused by the acts or omissions of such Commercial Contributor in connection with its distribution of the Program in a commercial product offering. The obligations in this section do not apply to any claims or Losses relating to any actual or alleged intellectual property infringement. In order to qualify, an Indemnified Contributor must: a) promptly notify the Commercial Contributor in writing of such claim, and b) allow the Commercial Contributor to control, and cooperate with the Commercial Contributor in, the defense and any related settlement negotiations. The Indemnified Contributor may participate in any such claim at its own expense.
For example, a Contributor might include the Program in a commercial product offering, Product X. That Contributor is then a Commercial Contributor. If that Commercial Contributor then makes performance claims, or offers warranties related to Product X, those performance claims and warranties are such Commercial Contributor's responsibility alone. Under this section, the Commercial Contributor would have to defend claims against the other Contributors related to those performance claims and warranties, and if a court requires any other Contributor to pay any damages as a result, the Commercial Contributor must pay those damages.
5. NO WARRANTY
EXCEPT AS EXPRESSLY SET FORTH IN THIS AGREEMENT, THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is solely responsible for determining the appropriateness of using and distributing the Program and assumes all risks associated with its exercise of rights under this Agreement , including but not limited to the risks and costs of program errors, compliance with applicable laws, damage to or loss of data, programs or equipment, and unavailability or interruption of operations.
6. DISCLAIMER OF LIABILITY
EXCEPT AS EXPRESSLY SET FORTH IN THIS AGREEMENT, NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES.
7. GENERAL
If any provision of this Agreement is invalid or unenforceable under applicable law, it shall not affect the validity or enforceability of the remainder of the terms of this Agreement, and without further action by the parties hereto, such provision shall be reformed to the minimum extent necessary to make such provision valid and enforceable.
If Recipient institutes patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Program itself (excluding combinations of the Program with other software or hardware) infringes such Recipient's patent(s), then such Recipient's rights granted under Section 2(b) shall terminate as of the date such litigation is filed.
All Recipient's rights under this Agreement shall terminate if it fails to comply with any of the material terms or conditions of this Agreement and does not cure such failure in a reasonable period of time after becoming aware of such noncompliance. If all Recipient's rights under this Agreement terminate, Recipient agrees to cease use and distribution of the Program as soon as reasonably practicable. However, Recipient's obligations under this Agreement and any licenses granted by Recipient relating to the Program shall continue and survive.
Everyone is permitted to copy and distribute copies of this Agreement, but in order to avoid inconsistency the Agreement is copyrighted and may only be modified in the following manner. The Agreement Steward reserves the right to publish new versions (including revisions) of this Agreement from time to time. No one other than the Agreement Steward has the right to modify this Agreement. The Eclipse Foundation is the initial Agreement Steward. The Eclipse Foundation may assign the responsibility to serve as the Agreement Steward to a suitable separate entity. Each new version of the Agreement will be given a distinguishing version number. The Program (including Contributions) may always be distributed subject to the version of the Agreement under which it was received. In addition, after a new version of the Agreement is published, Contributor may elect to distribute the Program (including its Contributions) under the new version. Except as expressly stated in Sections 2(a) and 2(b) above, Recipient receives no rights or licenses to the intellectual property of any Contributor under this Agreement, whether expressly, by implication, estoppel or otherwise. All rights in the Program not expressly granted under this Agreement are reserved.
This Agreement is governed by the laws of the State of New York and the intellectual property laws of the United States of America. No party to this Agreement will bring a legal action under this Agreement more than one year after the cause of action arose. Each party waives its rights to a jury trial in any resulting litigation.

View File

@@ -0,0 +1,62 @@
Eclipse Paho MQTT Go client
===========================
This repository contains the source code for the [Eclipse Paho](http://eclipse.org/paho) MQTT Go client library.
This code builds a library which enable applications to connect to an [MQTT](http://mqtt.org) broker to publish messages, and to subscribe to topics and receive published messages.
This library supports a fully asynchronous mode of operation.
Installation and Build
----------------------
This client is designed to work with the standard Go tools, so installation is as easy as:
```
go get git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git
```
The client depends on Google's [websockets](http://godoc.org/code.google.com/p/go.net/websocket) package,
also easily installed with the command:
```
go get code.google.com/p/go.net/websocket
```
Usage and API
-------------
Detailed API documentation is available by using to godoc tool, or can be browsed online
using the [godoc.org](http://godoc.org/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git) service.
Make use of the library by importing it in your Go client source code. For example,
```
import MQTT "git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git"
```
Samples are available in the `/samples` directory for reference.
Runtime tracing
---------------
Tracing is enabled by assigning logs (from the Go log package) to the logging endpoints, ERROR, CRITICAL, WARN and DEBUG
Reporting bugs
--------------
Please report bugs under the "MQTT-Go" Component in [Eclipse Bugzilla](http://bugs.eclipse.org/bugs/) for the Paho Technology project. This is a very new library as of Q1 2014, so there are sure to be bugs.
More information
----------------
Discussion of the Paho clients takes place on the [Eclipse paho-dev mailing list](https://dev.eclipse.org/mailman/listinfo/paho-dev).
General questions about the MQTT protocol are discussed in the [MQTT Google Group](https://groups.google.com/forum/?hl=en-US&fromgroups#!forum/mqtt).
There is much more information available via the [MQTT community site](http://mqtt.org).

View File

@@ -0,0 +1,41 @@
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
<html xmlns="http://www.w3.org/1999/xhtml"><head>
<meta http-equiv="Content-Type" content="text/html; charset=ISO-8859-1">
<title>About</title>
</head>
<body lang="EN-US">
<h2>About This Content</h2>
<p><em>December 9, 2013</em></p>
<h3>License</h3>
<p>The Eclipse Foundation makes available all content in this plug-in ("Content"). Unless otherwise
indicated below, the Content is provided to you under the terms and conditions of the
Eclipse Public License Version 1.0 ("EPL") and Eclipse Distribution License Version 1.0 ("EDL").
A copy of the EPL is available at
<a href="http://www.eclipse.org/legal/epl-v10.html">http://www.eclipse.org/legal/epl-v10.html</a>
and a copy of the EDL is available at
<a href="http://www.eclipse.org/org/documents/edl-v10.php">http://www.eclipse.org/org/documents/edl-v10.php</a>.
For purposes of the EPL, "Program" will mean the Content.</p>
<p>If you did not receive this Content directly from the Eclipse Foundation, the Content is
being redistributed by another party ("Redistributor") and different terms and conditions may
apply to your use of any object code in the Content. Check the Redistributor's license that was
provided with the Content. If no such license exists, contact the Redistributor. Unless otherwise
indicated below, the terms and conditions of the EPL still apply to any source code in the Content
and such source code may be obtained at <a href="http://www.eclipse.org/">http://www.eclipse.org</a>.</p>
<h3>Third Party Content</h3>
<p>The Content includes items that have been sourced from third parties as set out below. If you
did not receive this Content directly from the Eclipse Foundation, the following is provided
for informational purposes only, and you should look to the Redistributor's license for
terms and conditions of use.</p>
<p><em>
<strong>None</strong> <br><br>
<br><br>
</em></p>
</body></html>

View File

@@ -0,0 +1,517 @@
/*
* Copyright (c) 2013 IBM Corp.
*
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Eclipse Public License v1.0
* which accompanies this distribution, and is available at
* http://www.eclipse.org/legal/epl-v10.html
*
* Contributors:
* Seth Hoenig
* Allan Stockdill-Mander
* Mike Robertson
*/
// Package mqtt provides an MQTT v3.1.1 client library.
package mqtt
import (
"errors"
"fmt"
"git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/packets"
"net"
"sync"
"time"
)
// ClientInt is the interface definition for a Client as used by this
// library, the interface is primarily to allow mocking tests.
type ClientInt interface {
IsConnected() bool
Connect() Token
Disconnect(uint)
disconnect()
Publish(string, byte, bool, interface{}) Token
Subscribe(string, byte, MessageHandler) Token
SubscribeMultiple(map[string]byte, MessageHandler) Token
Unsubscribe(...string) Token
}
// Client is an MQTT v3.1.1 client for communicating
// with an MQTT server using non-blocking methods that allow work
// to be done in the background.
// An application may connect to an MQTT server using:
// A plain TCP socket
// A secure SSL/TLS socket
// A websocket
// To enable ensured message delivery at Quality of Service (QoS) levels
// described in the MQTT spec, a message persistence mechanism must be
// used. This is done by providing a type which implements the Store
// interface. For convenience, FileStore and MemoryStore are provided
// implementations that should be sufficient for most use cases. More
// information can be found in their respective documentation.
// Numerous connection options may be specified by configuring a
// and then supplying a ClientOptions type.
type Client struct {
sync.RWMutex
messageIds
conn net.Conn
ibound chan packets.ControlPacket
obound chan *PacketAndToken
oboundP chan *PacketAndToken
msgRouter *router
stopRouter chan bool
incomingPubChan chan *packets.PublishPacket
errors chan error
stop chan struct{}
persist Store
options ClientOptions
lastContact lastcontact
pingOutstanding bool
connected bool
workers sync.WaitGroup
}
// NewClient will create an MQTT v3.1.1 client with all of the options specified
// in the provided ClientOptions. The client must have the Start method called
// on it before it may be used. This is to make sure resources (such as a net
// connection) are created before the application is actually ready.
func NewClient(o *ClientOptions) *Client {
c := &Client{}
c.options = *o
if c.options.Store == nil {
c.options.Store = NewMemoryStore()
}
switch c.options.ProtocolVersion {
case 3, 4:
c.options.protocolVersionExplicit = true
default:
c.options.ProtocolVersion = 4
c.options.protocolVersionExplicit = false
}
c.persist = c.options.Store
c.connected = false
c.messageIds = messageIds{index: make(map[uint16]Token)}
c.msgRouter, c.stopRouter = newRouter()
c.msgRouter.setDefaultHandler(c.options.DefaultPublishHander)
return c
}
// IsConnected returns a bool signifying whether
// the client is connected or not.
func (c *Client) IsConnected() bool {
c.RLock()
defer c.RUnlock()
return c.connected
}
func (c *Client) setConnected(status bool) {
c.Lock()
defer c.Unlock()
c.connected = status
}
//ErrNotConnected is the error returned from function calls that are
//made when the client is not connected to a broker
var ErrNotConnected = errors.New("Not Connected")
// Connect will create a connection to the message broker
// If clean session is false, then a slice will
// be returned containing Receipts for all messages
// that were in-flight at the last disconnect.
// If clean session is true, then any existing client
// state will be removed.
func (c *Client) Connect() Token {
var err error
t := newToken(packets.Connect).(*ConnectToken)
DEBUG.Println(CLI, "Connect()")
go func() {
var rc byte
cm := newConnectMsgFromOptions(&c.options)
for _, broker := range c.options.Servers {
CONN:
DEBUG.Println(CLI, "about to write new connect msg")
c.conn, err = openConnection(broker, &c.options.TLSConfig, c.options.ConnectTimeout)
if err == nil {
DEBUG.Println(CLI, "socket connected to broker")
switch c.options.ProtocolVersion {
case 3:
DEBUG.Println(CLI, "Using MQTT 3.1 protocol")
cm.ProtocolName = "MQIsdp"
cm.ProtocolVersion = 3
default:
DEBUG.Println(CLI, "Using MQTT 3.1.1 protocol")
c.options.ProtocolVersion = 4
cm.ProtocolName = "MQTT"
cm.ProtocolVersion = 4
}
cm.Write(c.conn)
rc = c.connect()
if rc != packets.Accepted {
c.conn.Close()
c.conn = nil
//if the protocol version was explicitly set don't do any fallback
if c.options.protocolVersionExplicit {
ERROR.Println(CLI, "Connecting to", broker, "CONNACK was not CONN_ACCEPTED, but rather", packets.ConnackReturnCodes[rc])
continue
}
if c.options.ProtocolVersion == 4 {
DEBUG.Println(CLI, "Trying reconnect using MQTT 3.1 protocol")
c.options.ProtocolVersion = 3
goto CONN
}
}
break
} else {
ERROR.Println(CLI, err.Error())
WARN.Println(CLI, "failed to connect to broker, trying next")
rc = packets.ErrNetworkError
}
}
if c.conn == nil {
ERROR.Println(CLI, "Failed to connect to a broker")
t.returnCode = rc
if rc != packets.ErrNetworkError {
t.err = packets.ConnErrors[rc]
} else {
t.err = fmt.Errorf("%s : %s", packets.ConnErrors[rc], err)
}
t.flowComplete()
return
}
c.lastContact.update()
c.persist.Open()
c.obound = make(chan *PacketAndToken, 100)
c.oboundP = make(chan *PacketAndToken, 100)
c.ibound = make(chan packets.ControlPacket)
c.errors = make(chan error)
c.stop = make(chan struct{})
c.incomingPubChan = make(chan *packets.PublishPacket, 100)
c.msgRouter.matchAndDispatch(c.incomingPubChan, c.options.Order, c)
c.workers.Add(1)
go outgoing(c)
go alllogic(c)
c.connected = true
DEBUG.Println(CLI, "client is connected")
if c.options.OnConnect != nil {
go c.options.OnConnect(c)
}
if c.options.KeepAlive != 0 {
c.workers.Add(1)
go keepalive(c)
}
// Take care of any messages in the store
//var leftovers []Receipt
if c.options.CleanSession == false {
//leftovers = c.resume()
} else {
c.persist.Reset()
}
// Do not start incoming until resume has completed
c.workers.Add(1)
go incoming(c)
DEBUG.Println(CLI, "exit startClient")
t.flowComplete()
}()
return t
}
// internal function used to reconnect the client when it loses its connection
func (c *Client) reconnect() {
DEBUG.Println(CLI, "enter reconnect")
var rc byte = 1
var sleep uint = 1
var err error
for rc != 0 {
cm := newConnectMsgFromOptions(&c.options)
for _, broker := range c.options.Servers {
CONN:
DEBUG.Println(CLI, "about to write new connect msg")
c.conn, err = openConnection(broker, &c.options.TLSConfig, c.options.ConnectTimeout)
if err == nil {
DEBUG.Println(CLI, "socket connected to broker")
switch c.options.ProtocolVersion {
case 3:
DEBUG.Println(CLI, "Using MQTT 3.1 protocol")
cm.ProtocolName = "MQIsdp"
cm.ProtocolVersion = 3
default:
DEBUG.Println(CLI, "Using MQTT 3.1.1 protocol")
c.options.ProtocolVersion = 4
cm.ProtocolName = "MQTT"
cm.ProtocolVersion = 4
}
cm.Write(c.conn)
rc = c.connect()
if rc != packets.Accepted {
c.conn.Close()
c.conn = nil
//if the protocol version was explicitly set don't do any fallback
if c.options.protocolVersionExplicit {
ERROR.Println(CLI, "Connecting to", broker, "CONNACK was not Accepted, but rather", packets.ConnackReturnCodes[rc])
continue
}
if c.options.ProtocolVersion == 4 {
DEBUG.Println(CLI, "Trying reconnect using MQTT 3.1 protocol")
c.options.ProtocolVersion = 3
goto CONN
}
}
break
} else {
ERROR.Println(CLI, err.Error())
WARN.Println(CLI, "failed to connect to broker, trying next")
rc = packets.ErrNetworkError
}
}
if rc != 0 {
DEBUG.Println(CLI, "Reconnect failed, sleeping for", sleep, "seconds")
time.Sleep(time.Duration(sleep) * time.Second)
if sleep <= uint(c.options.MaxReconnectInterval.Seconds()) {
sleep *= 2
}
}
}
c.lastContact.update()
c.stop = make(chan struct{})
c.workers.Add(1)
go outgoing(c)
go alllogic(c)
c.setConnected(true)
DEBUG.Println(CLI, "client is reconnected")
if c.options.OnConnect != nil {
go c.options.OnConnect(c)
}
if c.options.KeepAlive != 0 {
c.workers.Add(1)
go keepalive(c)
}
c.workers.Add(1)
go incoming(c)
}
// This function is only used for receiving a connack
// when the connection is first started.
// This prevents receiving incoming data while resume
// is in progress if clean session is false.
func (c *Client) connect() byte {
DEBUG.Println(NET, "connect started")
ca, err := packets.ReadPacket(c.conn)
if err != nil {
ERROR.Println(NET, "connect got error", err)
//c.errors <- err
return packets.ErrNetworkError
}
msg := ca.(*packets.ConnackPacket)
if msg == nil || msg.FixedHeader.MessageType != packets.Connack {
ERROR.Println(NET, "received msg that was nil or not CONNACK")
} else {
DEBUG.Println(NET, "received connack")
}
return msg.ReturnCode
}
// Disconnect will end the connection with the server, but not before waiting
// the specified number of milliseconds to wait for existing work to be
// completed.
func (c *Client) Disconnect(quiesce uint) {
if !c.IsConnected() {
WARN.Println(CLI, "already disconnected")
return
}
DEBUG.Println(CLI, "disconnecting")
c.setConnected(false)
dm := packets.NewControlPacket(packets.Disconnect).(*packets.DisconnectPacket)
dt := newToken(packets.Disconnect)
c.oboundP <- &PacketAndToken{p: dm, t: dt}
// wait for work to finish, or quiesce time consumed
dt.WaitTimeout(time.Duration(quiesce) * time.Millisecond)
c.disconnect()
}
// ForceDisconnect will end the connection with the mqtt broker immediately.
func (c *Client) forceDisconnect() {
if !c.IsConnected() {
WARN.Println(CLI, "already disconnected")
return
}
c.setConnected(false)
c.conn.Close()
DEBUG.Println(CLI, "forcefully disconnecting")
c.disconnect()
}
func (c *Client) internalConnLost(err error) {
close(c.stop)
c.conn.Close()
c.workers.Wait()
if c.IsConnected() {
if c.options.OnConnectionLost != nil {
go c.options.OnConnectionLost(c, err)
}
if c.options.AutoReconnect {
go c.reconnect()
} else {
c.setConnected(false)
}
}
}
func (c *Client) disconnect() {
select {
case <-c.stop:
//someone else has already closed the channel, must be error
default:
close(c.stop)
}
c.conn.Close()
c.workers.Wait()
close(c.stopRouter)
DEBUG.Println(CLI, "disconnected")
c.persist.Close()
}
// Publish will publish a message with the specified QoS
// and content to the specified topic.
// Returns a read only channel used to track
// the delivery of the message.
func (c *Client) Publish(topic string, qos byte, retained bool, payload interface{}) Token {
token := newToken(packets.Publish).(*PublishToken)
DEBUG.Println(CLI, "enter Publish")
if !c.IsConnected() {
token.err = ErrNotConnected
token.flowComplete()
return token
}
pub := packets.NewControlPacket(packets.Publish).(*packets.PublishPacket)
pub.Qos = qos
pub.TopicName = topic
pub.Retain = retained
switch payload.(type) {
case string:
pub.Payload = []byte(payload.(string))
case []byte:
pub.Payload = payload.([]byte)
default:
token.err = errors.New("Unknown payload type")
token.flowComplete()
return token
}
DEBUG.Println(CLI, "sending publish message, topic:", topic)
c.obound <- &PacketAndToken{p: pub, t: token}
return token
}
// Subscribe starts a new subscription. Provide a MessageHandler to be executed when
// a message is published on the topic provided.
func (c *Client) Subscribe(topic string, qos byte, callback MessageHandler) Token {
token := newToken(packets.Subscribe).(*SubscribeToken)
DEBUG.Println(CLI, "enter Subscribe")
if !c.IsConnected() {
token.err = ErrNotConnected
token.flowComplete()
return token
}
sub := packets.NewControlPacket(packets.Subscribe).(*packets.SubscribePacket)
if err := validateTopicAndQos(topic, qos); err != nil {
token.err = err
return token
}
sub.Topics = append(sub.Topics, topic)
sub.Qoss = append(sub.Qoss, qos)
DEBUG.Println(sub.String())
if callback != nil {
c.msgRouter.addRoute(topic, callback)
}
token.subs = append(token.subs, topic)
c.oboundP <- &PacketAndToken{p: sub, t: token}
DEBUG.Println(CLI, "exit Subscribe")
return token
}
// SubscribeMultiple starts a new subscription for multiple topics. Provide a MessageHandler to
// be executed when a message is published on one of the topics provided.
func (c *Client) SubscribeMultiple(filters map[string]byte, callback MessageHandler) Token {
var err error
token := newToken(packets.Subscribe).(*SubscribeToken)
DEBUG.Println(CLI, "enter SubscribeMultiple")
if !c.IsConnected() {
token.err = ErrNotConnected
token.flowComplete()
return token
}
sub := packets.NewControlPacket(packets.Subscribe).(*packets.SubscribePacket)
if sub.Topics, sub.Qoss, err = validateSubscribeMap(filters); err != nil {
token.err = err
return token
}
if callback != nil {
for topic := range filters {
c.msgRouter.addRoute(topic, callback)
}
}
token.subs = make([]string, len(sub.Topics))
copy(token.subs, sub.Topics)
c.oboundP <- &PacketAndToken{p: sub, t: token}
DEBUG.Println(CLI, "exit SubscribeMultiple")
return token
}
// Unsubscribe will end the subscription from each of the topics provided.
// Messages published to those topics from other clients will no longer be
// received.
func (c *Client) Unsubscribe(topics ...string) Token {
token := newToken(packets.Unsubscribe).(*UnsubscribeToken)
DEBUG.Println(CLI, "enter Unsubscribe")
if !c.IsConnected() {
token.err = ErrNotConnected
token.flowComplete()
return token
}
unsub := packets.NewControlPacket(packets.Unsubscribe).(*packets.UnsubscribePacket)
unsub.Topics = make([]string, len(topics))
copy(unsub.Topics, topics)
c.oboundP <- &PacketAndToken{p: unsub, t: token}
for _, topic := range topics {
c.msgRouter.deleteRoute(topic)
}
DEBUG.Println(CLI, "exit Unsubscribe")
return token
}
//DefaultConnectionLostHandler is a definition of a function that simply
//reports to the DEBUG log the reason for the client losing a connection.
func DefaultConnectionLostHandler(client *Client, reason error) {
DEBUG.Println("Connection lost:", reason.Error())
}

View File

@@ -0,0 +1,31 @@
/*
* Copyright (c) 2013 IBM Corp.
*
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Eclipse Public License v1.0
* which accompanies this distribution, and is available at
* http://www.eclipse.org/legal/epl-v10.html
*
* Contributors:
* Seth Hoenig
* Allan Stockdill-Mander
* Mike Robertson
*/
package mqtt
type component string
// Component names for debug output
const (
NET component = "[net] "
PNG component = "[pinger] "
CLI component = "[client] "
DEC component = "[decode] "
MES component = "[message] "
STR component = "[store] "
MID component = "[msgids] "
TST component = "[test] "
STA component = "[state] "
ERR component = "[error] "
)

View File

@@ -0,0 +1,15 @@
Eclipse Distribution License - v 1.0
Copyright (c) 2007, Eclipse Foundation, Inc. and its licensors.
All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
Neither the name of the Eclipse Foundation, Inc. nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View File

@@ -0,0 +1,70 @@
Eclipse Public License - v 1.0
THE ACCOMPANYING PROGRAM IS PROVIDED UNDER THE TERMS OF THIS ECLIPSE PUBLIC LICENSE ("AGREEMENT"). ANY USE, REPRODUCTION OR DISTRIBUTION OF THE PROGRAM CONSTITUTES RECIPIENT'S ACCEPTANCE OF THIS AGREEMENT.
1. DEFINITIONS
"Contribution" means:
a) in the case of the initial Contributor, the initial code and documentation distributed under this Agreement, and
b) in the case of each subsequent Contributor:
i) changes to the Program, and
ii) additions to the Program;
where such changes and/or additions to the Program originate from and are distributed by that particular Contributor. A Contribution 'originates' from a Contributor if it was added to the Program by such Contributor itself or anyone acting on such Contributor's behalf. Contributions do not include additions to the Program which: (i) are separate modules of software distributed in conjunction with the Program under their own license agreement, and (ii) are not derivative works of the Program.
"Contributor" means any person or entity that distributes the Program.
"Licensed Patents" mean patent claims licensable by a Contributor which are necessarily infringed by the use or sale of its Contribution alone or when combined with the Program.
"Program" means the Contributions distributed in accordance with this Agreement.
"Recipient" means anyone who receives the Program under this Agreement, including all Contributors.
2. GRANT OF RIGHTS
a) Subject to the terms of this Agreement, each Contributor hereby grants Recipient a non-exclusive, worldwide, royalty-free copyright license to reproduce, prepare derivative works of, publicly display, publicly perform, distribute and sublicense the Contribution of such Contributor, if any, and such derivative works, in source code and object code form.
b) Subject to the terms of this Agreement, each Contributor hereby grants Recipient a non-exclusive, worldwide, royalty-free patent license under Licensed Patents to make, use, sell, offer to sell, import and otherwise transfer the Contribution of such Contributor, if any, in source code and object code form. This patent license shall apply to the combination of the Contribution and the Program if, at the time the Contribution is added by the Contributor, such addition of the Contribution causes such combination to be covered by the Licensed Patents. The patent license shall not apply to any other combinations which include the Contribution. No hardware per se is licensed hereunder.
c) Recipient understands that although each Contributor grants the licenses to its Contributions set forth herein, no assurances are provided by any Contributor that the Program does not infringe the patent or other intellectual property rights of any other entity. Each Contributor disclaims any liability to Recipient for claims brought by any other entity based on infringement of intellectual property rights or otherwise. As a condition to exercising the rights and licenses granted hereunder, each Recipient hereby assumes sole responsibility to secure any other intellectual property rights needed, if any. For example, if a third party patent license is required to allow Recipient to distribute the Program, it is Recipient's responsibility to acquire that license before distributing the Program.
d) Each Contributor represents that to its knowledge it has sufficient copyright rights in its Contribution, if any, to grant the copyright license set forth in this Agreement.
3. REQUIREMENTS
A Contributor may choose to distribute the Program in object code form under its own license agreement, provided that:
a) it complies with the terms and conditions of this Agreement; and
b) its license agreement:
i) effectively disclaims on behalf of all Contributors all warranties and conditions, express and implied, including warranties or conditions of title and non-infringement, and implied warranties or conditions of merchantability and fitness for a particular purpose;
ii) effectively excludes on behalf of all Contributors all liability for damages, including direct, indirect, special, incidental and consequential damages, such as lost profits;
iii) states that any provisions which differ from this Agreement are offered by that Contributor alone and not by any other party; and
iv) states that source code for the Program is available from such Contributor, and informs licensees how to obtain it in a reasonable manner on or through a medium customarily used for software exchange.
When the Program is made available in source code form:
a) it must be made available under this Agreement; and
b) a copy of this Agreement must be included with each copy of the Program.
Contributors may not remove or alter any copyright notices contained within the Program.
Each Contributor must identify itself as the originator of its Contribution, if any, in a manner that reasonably allows subsequent Recipients to identify the originator of the Contribution.
4. COMMERCIAL DISTRIBUTION
Commercial distributors of software may accept certain responsibilities with respect to end users, business partners and the like. While this license is intended to facilitate the commercial use of the Program, the Contributor who includes the Program in a commercial product offering should do so in a manner which does not create potential liability for other Contributors. Therefore, if a Contributor includes the Program in a commercial product offering, such Contributor ("Commercial Contributor") hereby agrees to defend and indemnify every other Contributor ("Indemnified Contributor") against any losses, damages and costs (collectively "Losses") arising from claims, lawsuits and other legal actions brought by a third party against the Indemnified Contributor to the extent caused by the acts or omissions of such Commercial Contributor in connection with its distribution of the Program in a commercial product offering. The obligations in this section do not apply to any claims or Losses relating to any actual or alleged intellectual property infringement. In order to qualify, an Indemnified Contributor must: a) promptly notify the Commercial Contributor in writing of such claim, and b) allow the Commercial Contributor to control, and cooperate with the Commercial Contributor in, the defense and any related settlement negotiations. The Indemnified Contributor may participate in any such claim at its own expense.
For example, a Contributor might include the Program in a commercial product offering, Product X. That Contributor is then a Commercial Contributor. If that Commercial Contributor then makes performance claims, or offers warranties related to Product X, those performance claims and warranties are such Commercial Contributor's responsibility alone. Under this section, the Commercial Contributor would have to defend claims against the other Contributors related to those performance claims and warranties, and if a court requires any other Contributor to pay any damages as a result, the Commercial Contributor must pay those damages.
5. NO WARRANTY
EXCEPT AS EXPRESSLY SET FORTH IN THIS AGREEMENT, THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is solely responsible for determining the appropriateness of using and distributing the Program and assumes all risks associated with its exercise of rights under this Agreement , including but not limited to the risks and costs of program errors, compliance with applicable laws, damage to or loss of data, programs or equipment, and unavailability or interruption of operations.
6. DISCLAIMER OF LIABILITY
EXCEPT AS EXPRESSLY SET FORTH IN THIS AGREEMENT, NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES.
7. GENERAL
If any provision of this Agreement is invalid or unenforceable under applicable law, it shall not affect the validity or enforceability of the remainder of the terms of this Agreement, and without further action by the parties hereto, such provision shall be reformed to the minimum extent necessary to make such provision valid and enforceable.
If Recipient institutes patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Program itself (excluding combinations of the Program with other software or hardware) infringes such Recipient's patent(s), then such Recipient's rights granted under Section 2(b) shall terminate as of the date such litigation is filed.
All Recipient's rights under this Agreement shall terminate if it fails to comply with any of the material terms or conditions of this Agreement and does not cure such failure in a reasonable period of time after becoming aware of such noncompliance. If all Recipient's rights under this Agreement terminate, Recipient agrees to cease use and distribution of the Program as soon as reasonably practicable. However, Recipient's obligations under this Agreement and any licenses granted by Recipient relating to the Program shall continue and survive.
Everyone is permitted to copy and distribute copies of this Agreement, but in order to avoid inconsistency the Agreement is copyrighted and may only be modified in the following manner. The Agreement Steward reserves the right to publish new versions (including revisions) of this Agreement from time to time. No one other than the Agreement Steward has the right to modify this Agreement. The Eclipse Foundation is the initial Agreement Steward. The Eclipse Foundation may assign the responsibility to serve as the Agreement Steward to a suitable separate entity. Each new version of the Agreement will be given a distinguishing version number. The Program (including Contributions) may always be distributed subject to the version of the Agreement under which it was received. In addition, after a new version of the Agreement is published, Contributor may elect to distribute the Program (including its Contributions) under the new version. Except as expressly stated in Sections 2(a) and 2(b) above, Recipient receives no rights or licenses to the intellectual property of any Contributor under this Agreement, whether expressly, by implication, estoppel or otherwise. All rights in the Program not expressly granted under this Agreement are reserved.
This Agreement is governed by the laws of the State of New York and the intellectual property laws of the United States of America. No party to this Agreement will bring a legal action under this Agreement more than one year after the cause of action arose. Each party waives its rights to a jury trial in any resulting litigation.

View File

@@ -0,0 +1,258 @@
/*
* Copyright (c) 2013 IBM Corp.
*
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Eclipse Public License v1.0
* which accompanies this distribution, and is available at
* http://www.eclipse.org/legal/epl-v10.html
*
* Contributors:
* Seth Hoenig
* Allan Stockdill-Mander
* Mike Robertson
*/
package mqtt
import (
"git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/packets"
"io"
"io/ioutil"
"os"
"path"
"sync"
)
const (
msgExt = ".msg"
bkpExt = ".bkp"
)
// FileStore implements the store interface using the filesystem to provide
// true persistence, even across client failure. This is designed to use a
// single directory per running client. If you are running multiple clients
// on the same filesystem, you will need to be careful to specify unique
// store directories for each.
type FileStore struct {
sync.RWMutex
directory string
opened bool
}
// NewFileStore will create a new FileStore which stores its messages in the
// directory provided.
func NewFileStore(directory string) *FileStore {
store := &FileStore{
directory: directory,
opened: false,
}
return store
}
// Open will allow the FileStore to be used.
func (store *FileStore) Open() {
store.Lock()
defer store.Unlock()
// if no store directory was specified in ClientOpts, by default use the
// current working directory
if store.directory == "" {
store.directory, _ = os.Getwd()
}
// if store dir exists, great, otherwise, create it
if !exists(store.directory) {
perms := os.FileMode(0770)
merr := os.MkdirAll(store.directory, perms)
chkerr(merr)
}
store.opened = true
DEBUG.Println(STR, "store is opened at", store.directory)
}
// Close will disallow the FileStore from being used.
func (store *FileStore) Close() {
store.Lock()
defer store.Unlock()
store.opened = false
WARN.Println(STR, "store is not open")
}
// Put will put a message into the store, associated with the provided
// key value.
func (store *FileStore) Put(key string, m packets.ControlPacket) {
store.Lock()
defer store.Unlock()
chkcond(store.opened)
full := fullpath(store.directory, key)
if exists(full) {
backup(store.directory, key) // make a copy of what already exists
defer unbackup(store.directory, key)
}
write(store.directory, key, m)
chkcond(exists(full))
}
// Get will retrieve a message from the store, the one associated with
// the provided key value.
func (store *FileStore) Get(key string) packets.ControlPacket {
store.RLock()
defer store.RUnlock()
chkcond(store.opened)
filepath := fullpath(store.directory, key)
if !exists(filepath) {
return nil
}
mfile, oerr := os.Open(filepath)
chkerr(oerr)
//all, rerr := ioutil.ReadAll(mfile)
//chkerr(rerr)
msg, rerr := packets.ReadPacket(mfile)
chkerr(rerr)
cerr := mfile.Close()
chkerr(cerr)
return msg
}
// All will provide a list of all of the keys associated with messages
// currenly residing in the FileStore.
func (store *FileStore) All() []string {
store.RLock()
defer store.RUnlock()
return store.all()
}
// Del will remove the persisted message associated with the provided
// key from the FileStore.
func (store *FileStore) Del(key string) {
store.Lock()
defer store.Unlock()
store.del(key)
}
// Reset will remove all persisted messages from the FileStore.
func (store *FileStore) Reset() {
store.Lock()
defer store.Unlock()
WARN.Println(STR, "FileStore Reset")
for _, key := range store.all() {
store.del(key)
}
}
// lockless
func (store *FileStore) all() []string {
chkcond(store.opened)
keys := []string{}
files, rderr := ioutil.ReadDir(store.directory)
chkerr(rderr)
for _, f := range files {
DEBUG.Println(STR, "file in All():", f.Name())
key := f.Name()[0 : len(f.Name())-4] // remove file extension
keys = append(keys, key)
}
return keys
}
// lockless
func (store *FileStore) del(key string) {
chkcond(store.opened)
DEBUG.Println(STR, "store del filepath:", store.directory)
DEBUG.Println(STR, "store delete key:", key)
filepath := fullpath(store.directory, key)
DEBUG.Println(STR, "path of deletion:", filepath)
if !exists(filepath) {
WARN.Println(STR, "store could not delete key:", key)
return
}
rerr := os.Remove(filepath)
chkerr(rerr)
DEBUG.Println(STR, "del msg:", key)
chkcond(!exists(filepath))
}
func fullpath(store string, key string) string {
p := path.Join(store, key+msgExt)
return p
}
func bkppath(store string, key string) string {
p := path.Join(store, key+bkpExt)
return p
}
// create file called "X.[messageid].msg" located in the store
// the contents of the file is the bytes of the message
// if a message with m's message id already exists, it will
// be overwritten
// X will be 'i' for inbound messages, and O for outbound messages
func write(store, key string, m packets.ControlPacket) {
filepath := fullpath(store, key)
f, err := os.Create(filepath)
chkerr(err)
werr := m.Write(f)
chkerr(werr)
cerr := f.Close()
chkerr(cerr)
}
func exists(file string) bool {
if _, err := os.Stat(file); err != nil {
if os.IsNotExist(err) {
return false
}
chkerr(err)
}
return true
}
func backup(store, key string) {
bkpp := bkppath(store, key)
fulp := fullpath(store, key)
backup, err := os.Create(bkpp)
chkerr(err)
mfile, oerr := os.Open(fulp)
chkerr(oerr)
_, cerr := io.Copy(backup, mfile)
chkerr(cerr)
clberr := backup.Close()
chkerr(clberr)
clmerr := mfile.Close()
chkerr(clmerr)
}
// Identify .bkp files in the store and turn them into .msg files,
// whether or not it overwrites an existing file. This is safe because
// I'm copying the Paho Java client and they say it is.
func restore(store string) {
files, rderr := ioutil.ReadDir(store)
chkerr(rderr)
for _, f := range files {
fname := f.Name()
if len(fname) > 4 {
if fname[len(fname)-4:] == bkpExt {
key := fname[0 : len(fname)-4]
fulp := fullpath(store, key)
msg, cerr := os.Create(fulp)
chkerr(cerr)
bkpp := path.Join(store, fname)
bkp, oerr := os.Open(bkpp)
chkerr(oerr)
n, cerr := io.Copy(msg, bkp)
chkerr(cerr)
chkcond(n > 0)
clmerr := msg.Close()
chkerr(clmerr)
clberr := bkp.Close()
chkerr(clberr)
remerr := os.Remove(bkpp)
chkerr(remerr)
}
}
}
}
func unbackup(store, key string) {
bkpp := bkppath(store, key)
remerr := os.Remove(bkpp)
chkerr(remerr)
}

View File

@@ -0,0 +1,74 @@
FVT Instructions
================
The FVT tests are currenly only supported by [IBM MessageSight](http://www-03.ibm.com/software/products/us/en/messagesight/).
Support for [mosquitto](http://mosquitto.org/) and [IBM Really Small Message Broker](https://www.ibm.com/developerworks/community/groups/service/html/communityview?communityUuid=d5bedadd-e46f-4c97-af89-22d65ffee070) might be added in the future.
IBM MessageSight Configuration
------------------------------
The IBM MessageSight Virtual Appliance can be downloaded here:
[Download](http://www-933.ibm.com/support/fixcentral/swg/selectFixes?parent=ibm~Other+software&product=ibm/Other+software/MessageSight&function=fixId&fixids=1.0.0.1-IMA-DeveloperImage&includeSupersedes=0 "IBM MessageSight")
There is a nice blog post about it here:
[Blog](https://www.ibm.com/developerworks/community/blogs/c565c720-fe84-4f63-873f-607d87787327/entry/ibm_messagesight_for_developers_is_here?lang=en "Blog")
The virtual appliance must be installed into a virtual machine like
Oracle VirtualBox or VMWare Player. (Follow the instructions that come
with the download).
Next, copy your authorized keys (basically a file containing the public
rsa key of your own computer) onto the appliance to enable passwordless ssh.
For example,
Console> user sshkey add "scp://user@host:~/.ssh/authorized_keys"
More information can be found in the IBM MessageSight InfoCenter:
[InfoCenter](https://infocenters.hursley.ibm.com/ism/v1/help/index.jsp "InfoCenter")
Now, execute the script setup_IMA.sh to create the objects necessary
to configure the server for the unit test cases provided.
For example,
./setup_IMA.sh
You should now be able to view the objects on your server:
Console> imaserver show Endpoint Name=GoMqttEP1
Name = GoMqttEP1
Enabled = True
Port = 17001
Protocol = MQTT
Interface = all
SecurityProfile =
ConnectionPolicies = GoMqttCP1
MessagingPolicies = GoMqttMP1
MaxMessageSize = 1024KB
MessageHub = GoMqttTestHub
Description =
RSMB Configuration
------------------
Wait for SSL support?
Mosquitto Configuration
-----------------------
Launch mosquitto from the fvt directory, specifiying mosquitto.cfg as config file
``ex: /usr/bin/mosquitto -c ./mosquitto.cfg``
Note: Mosquitto requires SSL 1.1 or better, while Go 1.1.2 supports
only SSL v1.0. However, Go 1.2+ supports SSL v1.1 and SSL v1.2.
Other Notes
-----------
Go 1.1.2 does not support intermediate certificates, however Go 1.2+ does.

View File

@@ -0,0 +1,17 @@
allow_anonymous true
allow_duplicate_messages false
connection_messages true
log_dest stdout
log_timestamp true
log_type all
persistence false
bind_address 127.0.0.1
listener 17001
listener 17002
listener 17003
listener 17004
#capath ../samples/samplecerts
#certfile ../samples/samplecerts/server-crt.pem
#keyfile ../samples/samplecerts/server-key.pem

View File

@@ -0,0 +1,8 @@
allow_anonymous false
bind_address 127.0.0.1
connection_messages true
log_level detail
listener 17001
#listener 17003
#listener 17004

View File

@@ -0,0 +1,111 @@
#!/bin/bash
#######################################################################
# This script is for configuring your IBM Messaging Appliance for use #
# as an mqtt test server for testing the go-mqtt open source client. #
# It creates the Policies and Endpoints necessary to test particular #
# features of the client, such as IPv6, SSL, and other things #
# #
# You do not need this script for any other purpose. #
#######################################################################
# Edit options to match your configuration
IMA_HOST=9.41.55.184
IMA_USER=admin
HOST=9.41.55.146
USER=root
CERTDIR=~/GO/src/github.com/shoenig/go-mqtt/samples/samplecerts
echo 'Configuring your IBM Messaging Appliance for testing go-mqtt'
echo 'IMA_HOST: ' $IMA_HOST
function ima {
reply=`ssh $IMA_USER@$IMA_HOST imaserver $@`
}
function imp {
reply=`ssh $IMA_USER@$IMA_HOST file get $@`
}
ima create MessageHub Name=GoMqttTestHub
# Config "1" is a basic, open endpoint, port 17001
ima create MessagingPolicy \
Name=GoMqttMP1 \
Protocol=MQTT \
ActionList=Publish,Subscribe \
MaxMessages=100000 \
DestinationType=Topic \
Destination=*
ima create ConnectionPolicy \
Name=GoMqttCP1 \
Protocol=MQTT
ima create Endpoint \
Name=GoMqttEP1 \
Protocol=MQTT \
MessageHub=GoMqttTestHub \
ConnectionPolicies=GoMqttCP1 \
MessagingPolicies=GoMqttMP1 \
Port=17001
# Config "2" is IPv6 only , port 17002
# Config "3" is for authorization failures, port 17003
ima create ConnectionPolicy \
Name=GoMqttCP2 \
Protocol=MQTT \
ClientID=GoMqttClient
ima create Endpoint \
Name=GoMqttEP3 \
Protocol=MQTT \
MessageHub=GoMqttTestHub \
ConnectionPolicies=GoMqttCP2 \
MessagingPolicies=GoMqttMP1 \
Port=17003
# Config "4" is secure connections, port 17004
imp scp://$USER@$HOST:${CERTDIR}/server-crt.pem .
imp scp://$USER@$HOST:${CERTDIR}/server-key.pem .
imp scp://$USER@$HOST:${CERTDIR}/rootCA-crt.pem .
imp scp://$USER@$HOST:${CERTDIR}/intermediateCA-crt.pem .
ima apply Certificate \
CertFileName=server-crt.pem \
"CertFilePassword=" \
KeyFileName=server-key.pem \
"KeyFilePassword="
ima create CertificateProfile \
Name=GoMqttCertProf \
Certificate=server-crt.pem \
Key=server-key.pem
ima create SecurityProfile \
Name=GoMqttSecProf \
MinimumProtocolMethod=SSLv3 \
UseClientCertificate=True \
UsePasswordAuthentication=False \
Ciphers=Fast \
CertificateProfile=GoMqttCertProf
ima apply Certificate \
TrustedCertificate=rootCA-crt.pem \
SecurityProfileName=GoMqttSecProf
ima apply Certificate \
TrustedCertificate=intermediateCA-crt.pem \
SecurityProfileName=GoMqttSecProf
ima create Endpoint \
Name=GoMqttEP4 \
Port=17004 \
MessageHub=GoMqttTestHub \
ConnectionPolicies=GoMqttCP1 \
MessagingPolicies=GoMqttMP1 \
SecurityProfile=GoMqttSecProf \
Protocol=MQTT

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,496 @@
/*
* Copyright (c) 2013 IBM Corp.
*
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Eclipse Public License v1.0
* which accompanies this distribution, and is available at
* http://www.eclipse.org/legal/epl-v10.html
*
* Contributors:
* Seth Hoenig
* Allan Stockdill-Mander
* Mike Robertson
*/
package mqtt
import (
"bytes"
"fmt"
"git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/packets"
"testing"
)
/*******************************
**** Some helper functions ****
*******************************/
func b2s(bs []byte) string {
s := ""
for _, b := range bs {
s += fmt.Sprintf("%x ", b)
}
return s
}
/**********************************************
**** A mock store implementation for test ****
**********************************************/
type TestStore struct {
mput []uint16
mget []uint16
mdel []uint16
}
func (ts *TestStore) Open() {
}
func (ts *TestStore) Close() {
}
func (ts *TestStore) Put(key string, m packets.ControlPacket) {
ts.mput = append(ts.mput, m.Details().MessageID)
}
func (ts *TestStore) Get(key string) packets.ControlPacket {
mid := mIDFromKey(key)
ts.mget = append(ts.mget, mid)
return nil
}
func (ts *TestStore) All() []string {
return nil
}
func (ts *TestStore) Del(key string) {
mid := mIDFromKey(key)
ts.mdel = append(ts.mdel, mid)
}
func (ts *TestStore) Reset() {
}
/*******************
**** FileStore ****
*******************/
func Test_NewFileStore(t *testing.T) {
storedir := "/tmp/TestStore/_new"
f := NewFileStore(storedir)
if f.opened {
t.Fatalf("filestore was opened without opening it")
}
if f.directory != storedir {
t.Fatalf("filestore directory is wrong")
}
// storedir might exist or might not, just like with a real client
// the point is, we don't care, we just want it to exist after it is
// opened
}
func Test_FileStore_Open(t *testing.T) {
storedir := "/tmp/TestStore/_open"
f := NewFileStore(storedir)
f.Open()
if !f.opened {
t.Fatalf("filestore was not set open")
}
if f.directory != storedir {
t.Fatalf("filestore directory is wrong")
}
if !exists(storedir) {
t.Fatalf("filestore directory does not exst after opening it")
}
}
func Test_FileStore_Close(t *testing.T) {
storedir := "/tmp/TestStore/_unopen"
f := NewFileStore(storedir)
f.Open()
if !f.opened {
t.Fatalf("filestore was not set open")
}
if f.directory != storedir {
t.Fatalf("filestore directory is wrong")
}
if !exists(storedir) {
t.Fatalf("filestore directory does not exst after opening it")
}
f.Close()
if f.opened {
t.Fatalf("filestore was still open after unopen")
}
if !exists(storedir) {
t.Fatalf("filestore was deleted after unopen")
}
}
func Test_FileStore_write(t *testing.T) {
storedir := "/tmp/TestStore/_write"
f := NewFileStore(storedir)
f.Open()
pm := packets.NewControlPacket(packets.Publish).(*packets.PublishPacket)
pm.Qos = 1
pm.TopicName = "a/b/c"
pm.Payload = []byte{0xBE, 0xEF, 0xED}
pm.MessageID = 91
key := inboundKeyFromMID(pm.MessageID)
f.Put(key, pm)
if !exists(storedir + "/i.91.msg") {
t.Fatalf("message not in store")
}
}
func Test_FileStore_Get(t *testing.T) {
storedir := "/tmp/TestStore/_get"
f := NewFileStore(storedir)
f.Open()
pm := packets.NewControlPacket(packets.Publish).(*packets.PublishPacket)
pm.Qos = 1
pm.TopicName = "/a/b/c"
pm.Payload = []byte{0xBE, 0xEF, 0xED}
pm.MessageID = 120
key := outboundKeyFromMID(pm.MessageID)
f.Put(key, pm)
if !exists(storedir + "/o.120.msg") {
t.Fatalf("message not in store")
}
exp := []byte{
/* msg type */
0x32, // qos 1
/* remlen */
0x0d,
/* topic, msg id in varheader */
0x00, // length of topic
0x06,
0x2F, // /
0x61, // a
0x2F, // /
0x62, // b
0x2F, // /
0x63, // c
/* msg id (is always 2 bytes) */
0x00,
0x78,
/*payload */
0xBE,
0xEF,
0xED,
}
m := f.Get(key)
if m == nil {
t.Fatalf("message not retreived from store")
}
var msg bytes.Buffer
m.Write(&msg)
if !bytes.Equal(exp, msg.Bytes()) {
t.Fatal("message from store not same as what went in", msg.Bytes())
}
}
func Test_FileStore_All(t *testing.T) {
storedir := "/tmp/TestStore/_all"
f := NewFileStore(storedir)
f.Open()
pm := packets.NewControlPacket(packets.Publish).(*packets.PublishPacket)
pm.Qos = 2
pm.TopicName = "/t/r/v"
pm.Payload = []byte{0x01, 0x02}
pm.MessageID = 121
key := outboundKeyFromMID(pm.MessageID)
f.Put(key, pm)
keys := f.All()
if len(keys) != 1 {
t.Fatalf("FileStore.All does not have the messages")
}
if keys[0] != "o.121" {
t.Fatalf("FileStore.All has wrong key")
}
}
func Test_FileStore_Del(t *testing.T) {
storedir := "/tmp/TestStore/_del"
f := NewFileStore(storedir)
f.Open()
pm := packets.NewControlPacket(packets.Publish).(*packets.PublishPacket)
pm.Qos = 1
pm.TopicName = "a/b/c"
pm.Payload = []byte{0xBE, 0xEF, 0xED}
pm.MessageID = 17
key := inboundKeyFromMID(pm.MessageID)
f.Put(key, pm)
if !exists(storedir + "/i.17.msg") {
t.Fatalf("message not in store")
}
f.Del(key)
if exists(storedir + "/i.17.msg") {
t.Fatalf("message still exists after deletion")
}
}
func Test_FileStore_Reset(t *testing.T) {
storedir := "/tmp/TestStore/_reset"
f := NewFileStore(storedir)
f.Open()
pm1 := packets.NewControlPacket(packets.Publish).(*packets.PublishPacket)
pm1.Qos = 1
pm1.TopicName = "/q/w/e"
pm1.Payload = []byte{0xBB}
pm1.MessageID = 71
key1 := inboundKeyFromMID(pm1.MessageID)
f.Put(key1, pm1)
pm2 := packets.NewControlPacket(packets.Publish).(*packets.PublishPacket)
pm2.Qos = 1
pm2.TopicName = "/q/w/e"
pm2.Payload = []byte{0xBB}
pm2.MessageID = 72
key2 := inboundKeyFromMID(pm2.MessageID)
f.Put(key2, pm2)
pm3 := packets.NewControlPacket(packets.Publish).(*packets.PublishPacket)
pm3.Qos = 1
pm3.TopicName = "/q/w/e"
pm3.Payload = []byte{0xBB}
pm3.MessageID = 73
key3 := inboundKeyFromMID(pm3.MessageID)
f.Put(key3, pm3)
pm4 := packets.NewControlPacket(packets.Publish).(*packets.PublishPacket)
pm4.Qos = 1
pm4.TopicName = "/q/w/e"
pm4.Payload = []byte{0xBB}
pm4.MessageID = 74
key4 := inboundKeyFromMID(pm4.MessageID)
f.Put(key4, pm4)
pm5 := packets.NewControlPacket(packets.Publish).(*packets.PublishPacket)
pm5.Qos = 1
pm5.TopicName = "/q/w/e"
pm5.Payload = []byte{0xBB}
pm5.MessageID = 75
key5 := inboundKeyFromMID(pm5.MessageID)
f.Put(key5, pm5)
if !exists(storedir + "/i.71.msg") {
t.Fatalf("message not in store")
}
if !exists(storedir + "/i.72.msg") {
t.Fatalf("message not in store")
}
if !exists(storedir + "/i.73.msg") {
t.Fatalf("message not in store")
}
if !exists(storedir + "/i.74.msg") {
t.Fatalf("message not in store")
}
if !exists(storedir + "/i.75.msg") {
t.Fatalf("message not in store")
}
f.Reset()
if exists(storedir + "/i.71.msg") {
t.Fatalf("message still exists after reset")
}
if exists(storedir + "/i.72.msg") {
t.Fatalf("message still exists after reset")
}
if exists(storedir + "/i.73.msg") {
t.Fatalf("message still exists after reset")
}
if exists(storedir + "/i.74.msg") {
t.Fatalf("message still exists after reset")
}
if exists(storedir + "/i.75.msg") {
t.Fatalf("message still exists after reset")
}
}
/*******************
*** MemoryStore ***
*******************/
func Test_NewMemoryStore(t *testing.T) {
m := NewMemoryStore()
if m == nil {
t.Fatalf("MemoryStore could not be created")
}
}
func Test_MemoryStore_Open(t *testing.T) {
m := NewMemoryStore()
m.Open()
if !m.opened {
t.Fatalf("MemoryStore was not set open")
}
}
func Test_MemoryStore_Close(t *testing.T) {
m := NewMemoryStore()
m.Open()
if !m.opened {
t.Fatalf("MemoryStore was not set open")
}
m.Close()
if m.opened {
t.Fatalf("MemoryStore was still open after unopen")
}
}
func Test_MemoryStore_Reset(t *testing.T) {
m := NewMemoryStore()
m.Open()
pm := packets.NewControlPacket(packets.Publish).(*packets.PublishPacket)
pm.Qos = 2
pm.TopicName = "/f/r/s"
pm.Payload = []byte{0xAB}
pm.MessageID = 81
key := outboundKeyFromMID(pm.MessageID)
m.Put(key, pm)
if len(m.messages) != 1 {
t.Fatalf("message not in memstore")
}
m.Reset()
if len(m.messages) != 0 {
t.Fatalf("reset did not clear memstore")
}
}
func Test_MemoryStore_write(t *testing.T) {
m := NewMemoryStore()
m.Open()
pm := packets.NewControlPacket(packets.Publish).(*packets.PublishPacket)
pm.Qos = 1
pm.TopicName = "/a/b/c"
pm.Payload = []byte{0xBE, 0xEF, 0xED}
pm.MessageID = 91
key := inboundKeyFromMID(pm.MessageID)
m.Put(key, pm)
if len(m.messages) != 1 {
t.Fatalf("message not in store")
}
}
func Test_MemoryStore_Get(t *testing.T) {
m := NewMemoryStore()
m.Open()
pm := packets.NewControlPacket(packets.Publish).(*packets.PublishPacket)
pm.Qos = 1
pm.TopicName = "/a/b/c"
pm.Payload = []byte{0xBE, 0xEF, 0xED}
pm.MessageID = 120
key := outboundKeyFromMID(pm.MessageID)
m.Put(key, pm)
if len(m.messages) != 1 {
t.Fatalf("message not in store")
}
exp := []byte{
/* msg type */
0x32, // qos 1
/* remlen */
0x0d,
/* topic, msg id in varheader */
0x00, // length of topic
0x06,
0x2F, // /
0x61, // a
0x2F, // /
0x62, // b
0x2F, // /
0x63, // c
/* msg id (is always 2 bytes) */
0x00,
0x78,
/*payload */
0xBE,
0xEF,
0xED,
}
msg := m.Get(key)
if msg == nil {
t.Fatalf("message not retreived from store")
}
var buf bytes.Buffer
msg.Write(&buf)
if !bytes.Equal(exp, buf.Bytes()) {
t.Fatalf("message from store not same as what went in")
}
}
func Test_MemoryStore_Del(t *testing.T) {
m := NewMemoryStore()
m.Open()
pm := packets.NewControlPacket(packets.Publish).(*packets.PublishPacket)
pm.Qos = 1
pm.TopicName = "/a/b/c"
pm.Payload = []byte{0xBE, 0xEF, 0xED}
pm.MessageID = 17
key := outboundKeyFromMID(pm.MessageID)
m.Put(key, pm)
if len(m.messages) != 1 {
t.Fatalf("message not in store")
}
m.Del(key)
if len(m.messages) != 1 {
t.Fatalf("message still exists after deletion")
}
}

View File

@@ -0,0 +1,26 @@
/*
* Copyright (c) 2013 IBM Corp.
*
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Eclipse Public License v1.0
* which accompanies this distribution, and is available at
* http://www.eclipse.org/legal/epl-v10.html
*
* Contributors:
* Seth Hoenig
* Allan Stockdill-Mander
* Mike Robertson
*/
package mqtt
// Use setup_IMA.sh for IBM's MessageSight
// Use fvt/rsmb.cfg for IBM's Really Small Message Broker
// Use fvt/mosquitto.cfg for the open source Mosquitto project
// Set these values to the URI of your MQTT Broker before running go-test
const (
FVTAddr = "iot.eclipse.org"
FVTTCP = "tcp://" + FVTAddr + ":1883"
FVTSSL = "ssl://" + FVTAddr + ":8883"
)

View File

@@ -0,0 +1,119 @@
/*
* Copyright (c) 2013 IBM Corp.
*
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Eclipse Public License v1.0
* which accompanies this distribution, and is available at
* http://www.eclipse.org/legal/epl-v10.html
*
* Contributors:
* Seth Hoenig
* Allan Stockdill-Mander
* Mike Robertson
*/
package mqtt
import (
"git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/packets"
"sync"
)
// MemoryStore implements the store interface to provide a "persistence"
// mechanism wholly stored in memory. This is only useful for
// as long as the client instance exists.
type MemoryStore struct {
sync.RWMutex
messages map[string]packets.ControlPacket
opened bool
}
// NewMemoryStore returns a pointer to a new instance of
// MemoryStore, the instance is not initialized and ready to
// use until Open() has been called on it.
func NewMemoryStore() *MemoryStore {
store := &MemoryStore{
messages: make(map[string]packets.ControlPacket),
opened: false,
}
return store
}
// Open initializes a MemoryStore instance.
func (store *MemoryStore) Open() {
store.Lock()
defer store.Unlock()
store.opened = true
DEBUG.Println(STR, "memorystore initialized")
}
// Put takes a key and a pointer to a Message and stores the
// message.
func (store *MemoryStore) Put(key string, message packets.ControlPacket) {
store.Lock()
defer store.Unlock()
chkcond(store.opened)
store.messages[key] = message
}
// Get takes a key and looks in the store for a matching Message
// returning either the Message pointer or nil.
func (store *MemoryStore) Get(key string) packets.ControlPacket {
store.RLock()
defer store.RUnlock()
chkcond(store.opened)
mid := mIDFromKey(key)
m := store.messages[key]
if m == nil {
CRITICAL.Println(STR, "memorystore get: message", mid, "not found")
} else {
DEBUG.Println(STR, "memorystore get: message", mid, "found")
}
return m
}
// All returns a slice of strings containing all the keys currently
// in the MemoryStore.
func (store *MemoryStore) All() []string {
store.RLock()
defer store.RUnlock()
chkcond(store.opened)
keys := []string{}
for k := range store.messages {
keys = append(keys, k)
}
return keys
}
// Del takes a key, searches the MemoryStore and if the key is found
// deletes the Message pointer associated with it.
func (store *MemoryStore) Del(key string) {
store.Lock()
defer store.Unlock()
mid := mIDFromKey(key)
m := store.messages[key]
if m == nil {
WARN.Println(STR, "memorystore del: message", mid, "not found")
} else {
store.messages[key] = nil
DEBUG.Println(STR, "memorystore del: message", mid, "was deleted")
}
}
// Close will disallow modifications to the state of the store.
func (store *MemoryStore) Close() {
store.Lock()
defer store.Unlock()
chkcond(store.opened)
store.opened = false
DEBUG.Println(STR, "memorystore closed")
}
// Reset eliminates all persisted message data in the store.
func (store *MemoryStore) Reset() {
store.Lock()
defer store.Unlock()
chkcond(store.opened)
store.messages = make(map[string]packets.ControlPacket)
WARN.Println(STR, "memorystore wiped")
}

View File

@@ -0,0 +1,104 @@
/*
* Copyright (c) 2013 IBM Corp.
*
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Eclipse Public License v1.0
* which accompanies this distribution, and is available at
* http://www.eclipse.org/legal/epl-v10.html
*
* Contributors:
* Seth Hoenig
* Allan Stockdill-Mander
* Mike Robertson
*/
package mqtt
import (
"git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/packets"
)
// Message defines the externals that a message implementation must support
// these are received messages that are passed to the callbacks, not internal
// messages
type Message interface {
Duplicate() bool
Qos() byte
Retained() bool
Topic() string
MessageID() uint16
Payload() []byte
}
type message struct {
duplicate bool
qos byte
retained bool
topic string
messageID uint16
payload []byte
}
func (m *message) Duplicate() bool {
return m.duplicate
}
func (m *message) Qos() byte {
return m.qos
}
func (m *message) Retained() bool {
return m.retained
}
func (m *message) Topic() string {
return m.topic
}
func (m *message) MessageID() uint16 {
return m.messageID
}
func (m *message) Payload() []byte {
return m.payload
}
func messageFromPublish(p *packets.PublishPacket) Message {
return &message{
duplicate: p.Dup,
qos: p.Qos,
retained: p.Retain,
topic: p.TopicName,
messageID: p.MessageID,
payload: p.Payload,
}
}
func newConnectMsgFromOptions(options *ClientOptions) *packets.ConnectPacket {
m := packets.NewControlPacket(packets.Connect).(*packets.ConnectPacket)
m.CleanSession = options.CleanSession
m.WillFlag = options.WillEnabled
m.WillRetain = options.WillRetained
m.ClientIdentifier = options.ClientID
if options.WillEnabled {
m.WillQos = options.WillQos
m.WillTopic = options.WillTopic
m.WillMessage = options.WillPayload
}
if options.Username != "" {
m.UsernameFlag = true
m.Username = options.Username
//mustn't have password without user as well
if options.Password != "" {
m.PasswordFlag = true
m.Password = []byte(options.Password)
}
}
m.KeepaliveTimer = uint16(options.KeepAlive)
return m
}

View File

@@ -0,0 +1,61 @@
/*
* Copyright (c) 2013 IBM Corp.
*
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Eclipse Public License v1.0
* which accompanies this distribution, and is available at
* http://www.eclipse.org/legal/epl-v10.html
*
* Contributors:
* Seth Hoenig
* Allan Stockdill-Mander
* Mike Robertson
*/
package mqtt
import (
"sync"
)
// MId is 16 bit message id as specified by the MQTT spec.
// In general, these values should not be depended upon by
// the client application.
type MId uint16
type messageIds struct {
sync.RWMutex
index map[uint16]Token
}
const (
midMin uint16 = 1
midMax uint16 = 65535
)
func (mids *messageIds) freeID(id uint16) {
mids.Lock()
defer mids.Unlock()
delete(mids.index, id)
}
func (mids *messageIds) getID(t Token) uint16 {
mids.Lock()
defer mids.Unlock()
for i := midMin; i < midMax; i++ {
if _, ok := mids.index[i]; !ok {
mids.index[i] = t
return i
}
}
return 0
}
func (mids *messageIds) getToken(id uint16) Token {
mids.RLock()
defer mids.RUnlock()
if token, ok := mids.index[id]; ok {
return token
}
return nil
}

View File

@@ -0,0 +1,275 @@
/*
* Copyright (c) 2013 IBM Corp.
*
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Eclipse Public License v1.0
* which accompanies this distribution, and is available at
* http://www.eclipse.org/legal/epl-v10.html
*
* Contributors:
* Seth Hoenig
* Allan Stockdill-Mander
* Mike Robertson
*/
package mqtt
import (
"crypto/tls"
"errors"
"git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/packets"
"golang.org/x/net/websocket"
"net"
"net/url"
"reflect"
"time"
)
func openConnection(uri *url.URL, tlsc *tls.Config, timeout time.Duration) (net.Conn, error) {
switch uri.Scheme {
case "ws":
conn, err := websocket.Dial(uri.String(), "mqtt", "ws://localhost")
if err != nil {
return nil, err
}
conn.PayloadType = websocket.BinaryFrame
return conn, err
case "wss":
config, _ := websocket.NewConfig(uri.String(), "ws://localhost")
config.Protocol = []string{"mqtt"}
config.TlsConfig = tlsc
conn, err := websocket.DialConfig(config)
if err != nil {
return nil, err
}
conn.PayloadType = websocket.BinaryFrame
return conn, err
case "tcp":
conn, err := net.DialTimeout("tcp", uri.Host, timeout)
if err != nil {
return nil, err
}
return conn, nil
case "ssl":
fallthrough
case "tls":
fallthrough
case "tcps":
conn, err := tls.DialWithDialer(&net.Dialer{Timeout: timeout}, "tcp", uri.Host, tlsc)
if err != nil {
return nil, err
}
return conn, nil
}
return nil, errors.New("Unknown protocol")
}
// actually read incoming messages off the wire
// send Message object into ibound channel
func incoming(c *Client) {
defer c.workers.Done()
var err error
var cp packets.ControlPacket
DEBUG.Println(NET, "incoming started")
for {
if cp, err = packets.ReadPacket(c.conn); err != nil {
break
}
DEBUG.Println(NET, "Received Message")
c.ibound <- cp
}
// We received an error on read.
// If disconnect is in progress, swallow error and return
select {
case <-c.stop:
DEBUG.Println(NET, "incoming stopped")
return
// Not trying to disconnect, send the error to the errors channel
default:
ERROR.Println(NET, "incoming stopped with error")
c.errors <- err
return
}
}
// receive a Message object on obound, and then
// actually send outgoing message to the wire
func outgoing(c *Client) {
defer c.workers.Done()
DEBUG.Println(NET, "outgoing started")
for {
DEBUG.Println(NET, "outgoing waiting for an outbound message")
select {
case <-c.stop:
DEBUG.Println(NET, "outgoing stopped")
return
case pub := <-c.obound:
msg := pub.p.(*packets.PublishPacket)
if msg.Qos != 0 && msg.MessageID == 0 {
msg.MessageID = c.getID(pub.t)
pub.t.(*PublishToken).messageID = msg.MessageID
}
//persist_obound(c.persist, msg)
if c.options.WriteTimeout > 0 {
c.conn.SetWriteDeadline(time.Now().Add(c.options.WriteTimeout))
}
if err := msg.Write(c.conn); err != nil {
ERROR.Println(NET, "outgoing stopped with error")
c.errors <- err
return
}
if c.options.WriteTimeout > 0 {
// If we successfully wrote, we don't want the timeout to happen during an idle period
// so we reset it to infinite.
c.conn.SetWriteDeadline(time.Time{})
}
if msg.Qos == 0 {
pub.t.flowComplete()
}
c.lastContact.update()
DEBUG.Println(NET, "obound wrote msg, id:", msg.MessageID)
case msg := <-c.oboundP:
switch msg.p.(type) {
case *packets.SubscribePacket:
msg.p.(*packets.SubscribePacket).MessageID = c.getID(msg.t)
case *packets.UnsubscribePacket:
msg.p.(*packets.UnsubscribePacket).MessageID = c.getID(msg.t)
}
DEBUG.Println(NET, "obound priority msg to write, type", reflect.TypeOf(msg.p))
if err := msg.p.Write(c.conn); err != nil {
ERROR.Println(NET, "outgoing stopped with error")
c.errors <- err
return
}
c.lastContact.update()
switch msg.p.(type) {
case *packets.DisconnectPacket:
msg.t.(*DisconnectToken).flowComplete()
DEBUG.Println(NET, "outbound wrote disconnect, stopping")
return
}
}
}
}
// receive Message objects on ibound
// store messages if necessary
// send replies on obound
// delete messages from store if necessary
func alllogic(c *Client) {
DEBUG.Println(NET, "logic started")
for {
DEBUG.Println(NET, "logic waiting for msg on ibound")
select {
case msg := <-c.ibound:
DEBUG.Println(NET, "logic got msg on ibound")
//persist_ibound(c.persist, msg)
switch msg.(type) {
case *packets.PingrespPacket:
DEBUG.Println(NET, "received pingresp")
c.pingOutstanding = false
case *packets.SubackPacket:
sa := msg.(*packets.SubackPacket)
DEBUG.Println(NET, "received suback, id:", sa.MessageID)
token := c.getToken(sa.MessageID).(*SubscribeToken)
DEBUG.Println(NET, "granted qoss", sa.GrantedQoss)
for i, qos := range sa.GrantedQoss {
token.subResult[token.subs[i]] = qos
}
token.flowComplete()
go c.freeID(sa.MessageID)
case *packets.UnsubackPacket:
ua := msg.(*packets.UnsubackPacket)
DEBUG.Println(NET, "received unsuback, id:", ua.MessageID)
token := c.getToken(ua.MessageID).(*UnsubscribeToken)
token.flowComplete()
go c.freeID(ua.MessageID)
case *packets.PublishPacket:
pp := msg.(*packets.PublishPacket)
DEBUG.Println(NET, "received publish, msgId:", pp.MessageID)
DEBUG.Println(NET, "putting msg on onPubChan")
switch pp.Qos {
case 2:
c.incomingPubChan <- pp
DEBUG.Println(NET, "done putting msg on incomingPubChan")
pr := packets.NewControlPacket(packets.Pubrec).(*packets.PubrecPacket)
pr.MessageID = pp.MessageID
DEBUG.Println(NET, "putting pubrec msg on obound")
c.oboundP <- &PacketAndToken{p: pr, t: nil}
DEBUG.Println(NET, "done putting pubrec msg on obound")
case 1:
c.incomingPubChan <- pp
DEBUG.Println(NET, "done putting msg on incomingPubChan")
pa := packets.NewControlPacket(packets.Puback).(*packets.PubackPacket)
pa.MessageID = pp.MessageID
DEBUG.Println(NET, "putting puback msg on obound")
c.oboundP <- &PacketAndToken{p: pa, t: nil}
DEBUG.Println(NET, "done putting puback msg on obound")
case 0:
select {
case c.incomingPubChan <- pp:
DEBUG.Println(NET, "done putting msg on incomingPubChan")
case err, ok := <-c.errors:
DEBUG.Println(NET, "error while putting msg on pubChanZero")
// We are unblocked, but need to put the error back on so the outer
// select can handle it appropriately.
if ok {
go func(errVal error, errChan chan error) {
errChan <- errVal
}(err, c.errors)
}
}
}
case *packets.PubackPacket:
pa := msg.(*packets.PubackPacket)
DEBUG.Println(NET, "received puback, id:", pa.MessageID)
// c.receipts.get(msg.MsgId()) <- Receipt{}
// c.receipts.end(msg.MsgId())
c.getToken(pa.MessageID).flowComplete()
c.freeID(pa.MessageID)
case *packets.PubrecPacket:
prec := msg.(*packets.PubrecPacket)
DEBUG.Println(NET, "received pubrec, id:", prec.MessageID)
prel := packets.NewControlPacket(packets.Pubrel).(*packets.PubrelPacket)
prel.MessageID = prec.MessageID
select {
case c.oboundP <- &PacketAndToken{p: prel, t: nil}:
case <-time.After(time.Second):
}
case *packets.PubrelPacket:
pr := msg.(*packets.PubrelPacket)
DEBUG.Println(NET, "received pubrel, id:", pr.MessageID)
pc := packets.NewControlPacket(packets.Pubcomp).(*packets.PubcompPacket)
pc.MessageID = pr.MessageID
select {
case c.oboundP <- &PacketAndToken{p: pc, t: nil}:
case <-time.After(time.Second):
}
case *packets.PubcompPacket:
pc := msg.(*packets.PubcompPacket)
DEBUG.Println(NET, "received pubcomp, id:", pc.MessageID)
c.getToken(pc.MessageID).flowComplete()
c.freeID(pc.MessageID)
}
case <-c.stop:
WARN.Println(NET, "logic stopped")
return
case err := <-c.errors:
ERROR.Println(NET, "logic got error")
c.internalConnLost(err)
return
}
c.lastContact.update()
}
}

View File

@@ -0,0 +1,17 @@
package mqtt
import (
"errors"
"fmt"
"strconv"
"testing"
)
func Test_openConnection(t *testing.T) {
_, err := strconv.Atoi("")
e := fmt.Errorf(" : %s", err)
t.Errorf("%#v", e)
e1 := errors.New("hogehoge %s")
t.Errorf("%#v", e1)
}

View File

@@ -0,0 +1,108 @@
<?xml version="1.0" encoding="ISO-8859-1" ?>
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<meta http-equiv="Content-Type" content="text/html; charset=ISO-8859-1" />
<title>Eclipse Foundation Software User Agreement</title>
</head>
<body lang="EN-US">
<h2>Eclipse Foundation Software User Agreement</h2>
<p>February 1, 2011</p>
<h3>Usage Of Content</h3>
<p>THE ECLIPSE FOUNDATION MAKES AVAILABLE SOFTWARE, DOCUMENTATION, INFORMATION AND/OR OTHER MATERIALS FOR OPEN SOURCE PROJECTS
(COLLECTIVELY &quot;CONTENT&quot;). USE OF THE CONTENT IS GOVERNED BY THE TERMS AND CONDITIONS OF THIS AGREEMENT AND/OR THE TERMS AND
CONDITIONS OF LICENSE AGREEMENTS OR NOTICES INDICATED OR REFERENCED BELOW. BY USING THE CONTENT, YOU AGREE THAT YOUR USE
OF THE CONTENT IS GOVERNED BY THIS AGREEMENT AND/OR THE TERMS AND CONDITIONS OF ANY APPLICABLE LICENSE AGREEMENTS OR
NOTICES INDICATED OR REFERENCED BELOW. IF YOU DO NOT AGREE TO THE TERMS AND CONDITIONS OF THIS AGREEMENT AND THE TERMS AND
CONDITIONS OF ANY APPLICABLE LICENSE AGREEMENTS OR NOTICES INDICATED OR REFERENCED BELOW, THEN YOU MAY NOT USE THE CONTENT.</p>
<h3>Applicable Licenses</h3>
<p>Unless otherwise indicated, all Content made available by the Eclipse Foundation is provided to you under the terms and conditions of the Eclipse Public License Version 1.0
(&quot;EPL&quot;). A copy of the EPL is provided with this Content and is also available at <a href="http://www.eclipse.org/legal/epl-v10.html">http://www.eclipse.org/legal/epl-v10.html</a>.
For purposes of the EPL, &quot;Program&quot; will mean the Content.</p>
<p>Content includes, but is not limited to, source code, object code, documentation and other files maintained in the Eclipse Foundation source code
repository (&quot;Repository&quot;) in software modules (&quot;Modules&quot;) and made available as downloadable archives (&quot;Downloads&quot;).</p>
<ul>
<li>Content may be structured and packaged into modules to facilitate delivering, extending, and upgrading the Content. Typical modules may include plug-ins (&quot;Plug-ins&quot;), plug-in fragments (&quot;Fragments&quot;), and features (&quot;Features&quot;).</li>
<li>Each Plug-in or Fragment may be packaged as a sub-directory or JAR (Java&trade; ARchive) in a directory named &quot;plugins&quot;.</li>
<li>A Feature is a bundle of one or more Plug-ins and/or Fragments and associated material. Each Feature may be packaged as a sub-directory in a directory named &quot;features&quot;. Within a Feature, files named &quot;feature.xml&quot; may contain a list of the names and version numbers of the Plug-ins
and/or Fragments associated with that Feature.</li>
<li>Features may also include other Features (&quot;Included Features&quot;). Within a Feature, files named &quot;feature.xml&quot; may contain a list of the names and version numbers of Included Features.</li>
</ul>
<p>The terms and conditions governing Plug-ins and Fragments should be contained in files named &quot;about.html&quot; (&quot;Abouts&quot;). The terms and conditions governing Features and
Included Features should be contained in files named &quot;license.html&quot; (&quot;Feature Licenses&quot;). Abouts and Feature Licenses may be located in any directory of a Download or Module
including, but not limited to the following locations:</p>
<ul>
<li>The top-level (root) directory</li>
<li>Plug-in and Fragment directories</li>
<li>Inside Plug-ins and Fragments packaged as JARs</li>
<li>Sub-directories of the directory named &quot;src&quot; of certain Plug-ins</li>
<li>Feature directories</li>
</ul>
<p>Note: if a Feature made available by the Eclipse Foundation is installed using the Provisioning Technology (as defined below), you must agree to a license (&quot;Feature Update License&quot;) during the
installation process. If the Feature contains Included Features, the Feature Update License should either provide you with the terms and conditions governing the Included Features or
inform you where you can locate them. Feature Update Licenses may be found in the &quot;license&quot; property of files named &quot;feature.properties&quot; found within a Feature.
Such Abouts, Feature Licenses, and Feature Update Licenses contain the terms and conditions (or references to such terms and conditions) that govern your use of the associated Content in
that directory.</p>
<p>THE ABOUTS, FEATURE LICENSES, AND FEATURE UPDATE LICENSES MAY REFER TO THE EPL OR OTHER LICENSE AGREEMENTS, NOTICES OR TERMS AND CONDITIONS. SOME OF THESE
OTHER LICENSE AGREEMENTS MAY INCLUDE (BUT ARE NOT LIMITED TO):</p>
<ul>
<li>Eclipse Distribution License Version 1.0 (available at <a href="http://www.eclipse.org/licenses/edl-v10.html">http://www.eclipse.org/licenses/edl-v1.0.html</a>)</li>
<li>Common Public License Version 1.0 (available at <a href="http://www.eclipse.org/legal/cpl-v10.html">http://www.eclipse.org/legal/cpl-v10.html</a>)</li>
<li>Apache Software License 1.1 (available at <a href="http://www.apache.org/licenses/LICENSE">http://www.apache.org/licenses/LICENSE</a>)</li>
<li>Apache Software License 2.0 (available at <a href="http://www.apache.org/licenses/LICENSE-2.0">http://www.apache.org/licenses/LICENSE-2.0</a>)</li>
<li>Metro Link Public License 1.00 (available at <a href="http://www.opengroup.org/openmotif/supporters/metrolink/license.html">http://www.opengroup.org/openmotif/supporters/metrolink/license.html</a>)</li>
<li>Mozilla Public License Version 1.1 (available at <a href="http://www.mozilla.org/MPL/MPL-1.1.html">http://www.mozilla.org/MPL/MPL-1.1.html</a>)</li>
</ul>
<p>IT IS YOUR OBLIGATION TO READ AND ACCEPT ALL SUCH TERMS AND CONDITIONS PRIOR TO USE OF THE CONTENT. If no About, Feature License, or Feature Update License is provided, please
contact the Eclipse Foundation to determine what terms and conditions govern that particular Content.</p>
<h3>Use of Provisioning Technology</h3>
<p>The Eclipse Foundation makes available provisioning software, examples of which include, but are not limited to, p2 and the Eclipse
Update Manager (&quot;Provisioning Technology&quot;) for the purpose of allowing users to install software, documentation, information and/or
other materials (collectively &quot;Installable Software&quot;). This capability is provided with the intent of allowing such users to
install, extend and update Eclipse-based products. Information about packaging Installable Software is available at <a
href="http://eclipse.org/equinox/p2/repository_packaging.html">http://eclipse.org/equinox/p2/repository_packaging.html</a>
(&quot;Specification&quot;).</p>
<p>You may use Provisioning Technology to allow other parties to install Installable Software. You shall be responsible for enabling the
applicable license agreements relating to the Installable Software to be presented to, and accepted by, the users of the Provisioning Technology
in accordance with the Specification. By using Provisioning Technology in such a manner and making it available in accordance with the
Specification, you further acknowledge your agreement to, and the acquisition of all necessary rights to permit the following:</p>
<ol>
<li>A series of actions may occur (&quot;Provisioning Process&quot;) in which a user may execute the Provisioning Technology
on a machine (&quot;Target Machine&quot;) with the intent of installing, extending or updating the functionality of an Eclipse-based
product.</li>
<li>During the Provisioning Process, the Provisioning Technology may cause third party Installable Software or a portion thereof to be
accessed and copied to the Target Machine.</li>
<li>Pursuant to the Specification, you will provide to the user the terms and conditions that govern the use of the Installable
Software (&quot;Installable Software Agreement&quot;) and such Installable Software Agreement shall be accessed from the Target
Machine in accordance with the Specification. Such Installable Software Agreement must inform the user of the terms and conditions that govern
the Installable Software and must solicit acceptance by the end user in the manner prescribed in such Installable Software Agreement. Upon such
indication of agreement by the user, the provisioning Technology will complete installation of the Installable Software.</li>
</ol>
<h3>Cryptography</h3>
<p>Content may contain encryption software. The country in which you are currently may have restrictions on the import, possession, and use, and/or re-export to
another country, of encryption software. BEFORE using any encryption software, please check the country's laws, regulations and policies concerning the import,
possession, or use, and re-export of encryption software, to see if this is permitted.</p>
<p><small>Java and all Java-based trademarks are trademarks of Oracle Corporation in the United States, other countries, or both.</small></p>
</body>
</html>

View File

@@ -0,0 +1,27 @@
/*
* Copyright (c) 2013 IBM Corp.
*
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Eclipse Public License v1.0
* which accompanies this distribution, and is available at
* http://www.eclipse.org/legal/epl-v10.html
*
* Contributors:
* Seth Hoenig
* Allan Stockdill-Mander
* Mike Robertson
*/
package mqtt
func chkerr(e error) {
if e != nil {
panic(e)
}
}
func chkcond(b bool) {
if !b {
panic("oops")
}
}

View File

@@ -0,0 +1,270 @@
/*
* Copyright (c) 2013 IBM Corp.
*
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Eclipse Public License v1.0
* which accompanies this distribution, and is available at
* http://www.eclipse.org/legal/epl-v10.html
*
* Contributors:
* Seth Hoenig
* Allan Stockdill-Mander
* Mike Robertson
*/
package mqtt
import (
"crypto/tls"
"net/url"
"time"
)
// MessageHandler is a callback type which can be set to be
// executed upon the arrival of messages published to topics
// to which the client is subscribed.
type MessageHandler func(*Client, Message)
// ConnectionLostHandler is a callback type which can be set to be
// executed upon an unintended disconnection from the MQTT broker.
// Disconnects caused by calling Disconnect or ForceDisconnect will
// not cause an OnConnectionLost callback to execute.
type ConnectionLostHandler func(*Client, error)
// OnConnectHandler is a callback that is called when the client
// state changes from unconnected/disconnected to connected. Both
// at initial connection and on reconnection
type OnConnectHandler func(*Client)
// ClientOptions contains configurable options for an Client.
type ClientOptions struct {
Servers []*url.URL
ClientID string
Username string
Password string
CleanSession bool
Order bool
WillEnabled bool
WillTopic string
WillPayload []byte
WillQos byte
WillRetained bool
ProtocolVersion uint
protocolVersionExplicit bool
TLSConfig tls.Config
KeepAlive time.Duration
ConnectTimeout time.Duration
MaxReconnectInterval time.Duration
AutoReconnect bool
Store Store
DefaultPublishHander MessageHandler
OnConnect OnConnectHandler
OnConnectionLost ConnectionLostHandler
WriteTimeout time.Duration
}
// NewClientOptions will create a new ClientClientOptions type with some
// default values.
// Port: 1883
// CleanSession: True
// Order: True
// KeepAlive: 30 (seconds)
// ConnectTimeout: 30 (seconds)
// MaxReconnectInterval 10 (minutes)
// AutoReconnect: True
func NewClientOptions() *ClientOptions {
o := &ClientOptions{
Servers: nil,
ClientID: "",
Username: "",
Password: "",
CleanSession: true,
Order: true,
WillEnabled: false,
WillTopic: "",
WillPayload: nil,
WillQos: 0,
WillRetained: false,
ProtocolVersion: 0,
protocolVersionExplicit: false,
TLSConfig: tls.Config{},
KeepAlive: 30 * time.Second,
ConnectTimeout: 30 * time.Second,
MaxReconnectInterval: 10 * time.Minute,
AutoReconnect: true,
Store: nil,
OnConnect: nil,
OnConnectionLost: DefaultConnectionLostHandler,
WriteTimeout: 0, // 0 represents timeout disabled
}
return o
}
// AddBroker adds a broker URI to the list of brokers to be used. The format should be
// scheme://host:port
// Where "scheme" is one of "tcp", "ssl", or "ws", "host" is the ip-address (or hostname)
// and "port" is the port on which the broker is accepting connections.
func (o *ClientOptions) AddBroker(server string) *ClientOptions {
brokerURI, _ := url.Parse(server)
o.Servers = append(o.Servers, brokerURI)
return o
}
// SetClientID will set the client id to be used by this client when
// connecting to the MQTT broker. According to the MQTT v3.1 specification,
// a client id mus be no longer than 23 characters.
func (o *ClientOptions) SetClientID(id string) *ClientOptions {
o.ClientID = id
return o
}
// SetUsername will set the username to be used by this client when connecting
// to the MQTT broker. Note: without the use of SSL/TLS, this information will
// be sent in plaintext accross the wire.
func (o *ClientOptions) SetUsername(u string) *ClientOptions {
o.Username = u
return o
}
// SetPassword will set the password to be used by this client when connecting
// to the MQTT broker. Note: without the use of SSL/TLS, this information will
// be sent in plaintext accross the wire.
func (o *ClientOptions) SetPassword(p string) *ClientOptions {
o.Password = p
return o
}
// SetCleanSession will set the "clean session" flag in the connect message
// when this client connects to an MQTT broker. By setting this flag, you are
// indicating that no messages saved by the broker for this client should be
// delivered. Any messages that were going to be sent by this client before
// diconnecting previously but didn't will not be sent upon connecting to the
// broker.
func (o *ClientOptions) SetCleanSession(clean bool) *ClientOptions {
o.CleanSession = clean
return o
}
// SetOrderMatters will set the message routing to guarantee order within
// each QoS level. By default, this value is true. If set to false,
// this flag indicates that messages can be delivered asynchronously
// from the client to the application and possibly arrive out of order.
func (o *ClientOptions) SetOrderMatters(order bool) *ClientOptions {
o.Order = order
return o
}
// SetTLSConfig will set an SSL/TLS configuration to be used when connecting
// to an MQTT broker. Please read the official Go documentation for more
// information.
func (o *ClientOptions) SetTLSConfig(t *tls.Config) *ClientOptions {
o.TLSConfig = *t
return o
}
// SetStore will set the implementation of the Store interface
// used to provide message persistence in cases where QoS levels
// QoS_ONE or QoS_TWO are used. If no store is provided, then the
// client will use MemoryStore by default.
func (o *ClientOptions) SetStore(s Store) *ClientOptions {
o.Store = s
return o
}
// SetKeepAlive will set the amount of time (in seconds) that the client
// should wait before sending a PING request to the broker. This will
// allow the client to know that a connection has not been lost with the
// server.
func (o *ClientOptions) SetKeepAlive(k time.Duration) *ClientOptions {
o.KeepAlive = k
return o
}
// SetProtocolVersion sets the MQTT version to be used to connect to the
// broker. Legitimate values are currently 3 - MQTT 3.1 or 4 - MQTT 3.1.1
func (o *ClientOptions) SetProtocolVersion(pv uint) *ClientOptions {
if pv >= 3 && pv <= 4 {
o.ProtocolVersion = pv
o.protocolVersionExplicit = true
}
return o
}
// UnsetWill will cause any set will message to be disregarded.
func (o *ClientOptions) UnsetWill() *ClientOptions {
o.WillEnabled = false
return o
}
// SetWill accepts a string will message to be set. When the client connects,
// it will give this will message to the broker, which will then publish the
// provided payload (the will) to any clients that are subscribed to the provided
// topic.
func (o *ClientOptions) SetWill(topic string, payload string, qos byte, retained bool) *ClientOptions {
o.SetBinaryWill(topic, []byte(payload), qos, retained)
return o
}
// SetBinaryWill accepts a []byte will message to be set. When the client connects,
// it will give this will message to the broker, which will then publish the
// provided payload (the will) to any clients that are subscribed to the provided
// topic.
func (o *ClientOptions) SetBinaryWill(topic string, payload []byte, qos byte, retained bool) *ClientOptions {
o.WillEnabled = true
o.WillTopic = topic
o.WillPayload = payload
o.WillQos = qos
o.WillRetained = retained
return o
}
// SetDefaultPublishHandler sets the MessageHandler that will be called when a message
// is received that does not match any known subscriptions.
func (o *ClientOptions) SetDefaultPublishHandler(defaultHandler MessageHandler) *ClientOptions {
o.DefaultPublishHander = defaultHandler
return o
}
// SetOnConnectHandler sets the function to be called when the client is connected. Both
// at initial connection time and upon automatic reconnect.
func (o *ClientOptions) SetOnConnectHandler(onConn OnConnectHandler) *ClientOptions {
o.OnConnect = onConn
return o
}
// SetConnectionLostHandler will set the OnConnectionLost callback to be executed
// in the case where the client unexpectedly loses connection with the MQTT broker.
func (o *ClientOptions) SetConnectionLostHandler(onLost ConnectionLostHandler) *ClientOptions {
o.OnConnectionLost = onLost
return o
}
// SetWriteTimeout puts a limit on how long a mqtt publish should block until it unblocks with a
// timeout error. A duration of 0 never times out. Default 30 seconds
func (o *ClientOptions) SetWriteTimeout(t time.Duration) *ClientOptions {
o.WriteTimeout = t
return o
}
// SetConnectTimeout limits how long the client will wait when trying to open a connection
// to an MQTT server before timeing out and erroring the attempt. A duration of 0 never times out.
// Default 30 seconds. Currently only operational on TCP/TLS connections.
func (o *ClientOptions) SetConnectTimeout(t time.Duration) *ClientOptions {
o.ConnectTimeout = t
return o
}
// SetMaxReconnectInterval sets the maximum time that will be waited between reconnection attempts
// when connection is lost
func (o *ClientOptions) SetMaxReconnectInterval(t time.Duration) *ClientOptions {
o.MaxReconnectInterval = t
return o
}
// SetAutoReconnect sets whether the automatic reconnection logic should be used
// when the connection is lost, even if disabled the ConnectionLostHandler is still
// called
func (o *ClientOptions) SetAutoReconnect(a bool) *ClientOptions {
o.AutoReconnect = a
return o
}

View File

@@ -0,0 +1,57 @@
package packets
import (
"bytes"
"fmt"
"github.com/pborman/uuid"
"io"
)
//ConnackPacket is an internal representation of the fields of the
//Connack MQTT packet
type ConnackPacket struct {
FixedHeader
TopicNameCompression byte
ReturnCode byte
uuid uuid.UUID
}
func (ca *ConnackPacket) String() string {
str := fmt.Sprintf("%s\n", ca.FixedHeader)
str += fmt.Sprintf("returncode: %d", ca.ReturnCode)
return str
}
func (ca *ConnackPacket) Write(w io.Writer) error {
var body bytes.Buffer
var err error
body.WriteByte(ca.TopicNameCompression)
body.WriteByte(ca.ReturnCode)
ca.FixedHeader.RemainingLength = 2
packet := ca.FixedHeader.pack()
packet.Write(body.Bytes())
_, err = packet.WriteTo(w)
return err
}
//Unpack decodes the details of a ControlPacket after the fixed
//header has been read
func (ca *ConnackPacket) Unpack(b io.Reader) {
ca.TopicNameCompression = decodeByte(b)
ca.ReturnCode = decodeByte(b)
}
//Details returns a Details struct containing the Qos and
//MessageID of this ControlPacket
func (ca *ConnackPacket) Details() Details {
return Details{Qos: 0, MessageID: 0}
}
//UUID returns the unique ID assigned to the ControlPacket when
//it was originally received. Note: this is not related to the
//MessageID field for MQTT packets
func (ca *ConnackPacket) UUID() uuid.UUID {
return ca.uuid
}

View File

@@ -0,0 +1,128 @@
package packets
import (
"bytes"
"fmt"
"github.com/pborman/uuid"
"io"
)
//ConnectPacket is an internal representation of the fields of the
//Connect MQTT packet
type ConnectPacket struct {
FixedHeader
ProtocolName string
ProtocolVersion byte
CleanSession bool
WillFlag bool
WillQos byte
WillRetain bool
UsernameFlag bool
PasswordFlag bool
ReservedBit byte
KeepaliveTimer uint16
ClientIdentifier string
WillTopic string
WillMessage []byte
Username string
Password []byte
uuid uuid.UUID
}
func (c *ConnectPacket) String() string {
str := fmt.Sprintf("%s\n", c.FixedHeader)
str += fmt.Sprintf("protocolversion: %d protocolname: %s cleansession: %t willflag: %t WillQos: %d WillRetain: %t Usernameflag: %t Passwordflag: %t keepalivetimer: %d\nclientId: %s\nwilltopic: %s\nwillmessage: %s\nUsername: %s\nPassword: %s\n", c.ProtocolVersion, c.ProtocolName, c.CleanSession, c.WillFlag, c.WillQos, c.WillRetain, c.UsernameFlag, c.PasswordFlag, c.KeepaliveTimer, c.ClientIdentifier, c.WillTopic, c.WillMessage, c.Username, c.Password)
return str
}
func (c *ConnectPacket) Write(w io.Writer) error {
var body bytes.Buffer
var err error
body.Write(encodeString(c.ProtocolName))
body.WriteByte(c.ProtocolVersion)
body.WriteByte(boolToByte(c.CleanSession)<<1 | boolToByte(c.WillFlag)<<2 | c.WillQos<<3 | boolToByte(c.WillRetain)<<5 | boolToByte(c.PasswordFlag)<<6 | boolToByte(c.UsernameFlag)<<7)
body.Write(encodeUint16(c.KeepaliveTimer))
body.Write(encodeString(c.ClientIdentifier))
if c.WillFlag {
body.Write(encodeString(c.WillTopic))
body.Write(encodeBytes(c.WillMessage))
}
if c.UsernameFlag {
body.Write(encodeString(c.Username))
}
if c.PasswordFlag {
body.Write(encodeBytes(c.Password))
}
c.FixedHeader.RemainingLength = body.Len()
packet := c.FixedHeader.pack()
packet.Write(body.Bytes())
_, err = packet.WriteTo(w)
return err
}
//Unpack decodes the details of a ControlPacket after the fixed
//header has been read
func (c *ConnectPacket) Unpack(b io.Reader) {
c.ProtocolName = decodeString(b)
c.ProtocolVersion = decodeByte(b)
options := decodeByte(b)
c.ReservedBit = 1 & options
c.CleanSession = 1&(options>>1) > 0
c.WillFlag = 1&(options>>2) > 0
c.WillQos = 3 & (options >> 3)
c.WillRetain = 1&(options>>5) > 0
c.PasswordFlag = 1&(options>>6) > 0
c.UsernameFlag = 1&(options>>7) > 0
c.KeepaliveTimer = decodeUint16(b)
c.ClientIdentifier = decodeString(b)
if c.WillFlag {
c.WillTopic = decodeString(b)
c.WillMessage = decodeBytes(b)
}
if c.UsernameFlag {
c.Username = decodeString(b)
}
if c.PasswordFlag {
c.Password = decodeBytes(b)
}
}
//Validate performs validation of the fields of a Connect packet
func (c *ConnectPacket) Validate() byte {
if c.PasswordFlag && !c.UsernameFlag {
return ErrRefusedBadUsernameOrPassword
}
if c.ReservedBit != 0 {
//Bad reserved bit
return ErrProtocolViolation
}
if (c.ProtocolName == "MQIsdp" && c.ProtocolVersion != 3) || (c.ProtocolName == "MQTT" && c.ProtocolVersion != 4) {
//Mismatched or unsupported protocol version
return ErrRefusedBadProtocolVersion
}
if c.ProtocolName != "MQIsdp" && c.ProtocolName != "MQTT" {
//Bad protocol name
return ErrProtocolViolation
}
if len(c.ClientIdentifier) > 65535 || len(c.Username) > 65535 || len(c.Password) > 65535 {
//Bad size field
return ErrProtocolViolation
}
return Accepted
}
//Details returns a Details struct containing the Qos and
//MessageID of this ControlPacket
func (c *ConnectPacket) Details() Details {
return Details{Qos: 0, MessageID: 0}
}
//UUID returns the unique ID assigned to the ControlPacket when
//it was originally received. Note: this is not related to the
//MessageID field for MQTT packets
func (c *ConnectPacket) UUID() uuid.UUID {
return c.uuid
}

View File

@@ -0,0 +1,44 @@
package packets
import (
"fmt"
"github.com/pborman/uuid"
"io"
)
//DisconnectPacket is an internal representation of the fields of the
//Disconnect MQTT packet
type DisconnectPacket struct {
FixedHeader
uuid uuid.UUID
}
func (d *DisconnectPacket) String() string {
str := fmt.Sprintf("%s\n", d.FixedHeader)
return str
}
func (d *DisconnectPacket) Write(w io.Writer) error {
packet := d.FixedHeader.pack()
_, err := packet.WriteTo(w)
return err
}
//Unpack decodes the details of a ControlPacket after the fixed
//header has been read
func (d *DisconnectPacket) Unpack(b io.Reader) {
}
//Details returns a Details struct containing the Qos and
//MessageID of this ControlPacket
func (d *DisconnectPacket) Details() Details {
return Details{Qos: 0, MessageID: 0}
}
//UUID returns the unique ID assigned to the ControlPacket when
//it was originally received. Note: this is not related to the
//MessageID field for MQTT packets
func (d *DisconnectPacket) UUID() uuid.UUID {
return d.uuid
}

View File

@@ -0,0 +1,324 @@
package packets
import (
"bytes"
"encoding/binary"
"errors"
"fmt"
"github.com/pborman/uuid"
"io"
)
//ControlPacket defines the interface for structs intended to hold
//decoded MQTT packets, either from being read or before being
//written
type ControlPacket interface {
Write(io.Writer) error
Unpack(io.Reader)
String() string
Details() Details
UUID() uuid.UUID
}
//PacketNames maps the constants for each of the MQTT packet types
//to a string representation of their name.
var PacketNames = map[uint8]string{
1: "CONNECT",
2: "CONNACK",
3: "PUBLISH",
4: "PUBACK",
5: "PUBREC",
6: "PUBREL",
7: "PUBCOMP",
8: "SUBSCRIBE",
9: "SUBACK",
10: "UNSUBSCRIBE",
11: "UNSUBACK",
12: "PINGREQ",
13: "PINGRESP",
14: "DISCONNECT",
}
//Below are the constants assigned to each of the MQTT packet types
const (
Connect = 1
Connack = 2
Publish = 3
Puback = 4
Pubrec = 5
Pubrel = 6
Pubcomp = 7
Subscribe = 8
Suback = 9
Unsubscribe = 10
Unsuback = 11
Pingreq = 12
Pingresp = 13
Disconnect = 14
)
//Below are the const definitions for error codes returned by
//Connect()
const (
Accepted = 0x00
ErrRefusedBadProtocolVersion = 0x01
ErrRefusedIDRejected = 0x02
ErrRefusedServerUnavailable = 0x03
ErrRefusedBadUsernameOrPassword = 0x04
ErrRefusedNotAuthorised = 0x05
ErrNetworkError = 0xFE
ErrProtocolViolation = 0xFF
)
//ConnackReturnCodes is a map of the error codes constants for Connect()
//to a string representation of the error
var ConnackReturnCodes = map[uint8]string{
0: "Connection Accepted",
1: "Connection Refused: Bad Protocol Version",
2: "Connection Refused: Client Identifier Rejected",
3: "Connection Refused: Server Unavailable",
4: "Connection Refused: Username or Password in unknown format",
5: "Connection Refused: Not Authorised",
254: "Connection Error",
255: "Connection Refused: Protocol Violation",
}
//ConnErrors is a map of the errors codes constants for Connect()
//to a Go error
var ConnErrors = map[byte]error{
Accepted: nil,
ErrRefusedBadProtocolVersion: errors.New("Unnacceptable protocol version"),
ErrRefusedIDRejected: errors.New("Identifier rejected"),
ErrRefusedServerUnavailable: errors.New("Server Unavailable"),
ErrRefusedBadUsernameOrPassword: errors.New("Bad user name or password"),
ErrRefusedNotAuthorised: errors.New("Not Authorized"),
ErrNetworkError: errors.New("Network Error"),
ErrProtocolViolation: errors.New("Protocol Violation"),
}
//ReadPacket takes an instance of an io.Reader (such as net.Conn) and attempts
//to read an MQTT packet from the stream. It returns a ControlPacket
//representing the decoded MQTT packet and an error. One of these returns will
//always be nil, a nil ControlPacket indicating an error occurred.
func ReadPacket(r io.Reader) (cp ControlPacket, err error) {
var fh FixedHeader
b := make([]byte, 1)
_, err = io.ReadFull(r, b)
if err != nil {
return nil, err
}
fh.unpack(b[0], r)
cp = NewControlPacketWithHeader(fh)
if cp == nil {
return nil, errors.New("Bad data from client")
}
packetBytes := make([]byte, fh.RemainingLength)
_, err = io.ReadFull(r, packetBytes)
if err != nil {
return nil, err
}
cp.Unpack(bytes.NewBuffer(packetBytes))
return cp, nil
}
//NewControlPacket is used to create a new ControlPacket of the type specified
//by packetType, this is usually done by reference to the packet type constants
//defined in packets.go. The newly created ControlPacket is empty and a pointer
//is returned.
func NewControlPacket(packetType byte) (cp ControlPacket) {
switch packetType {
case Connect:
cp = &ConnectPacket{FixedHeader: FixedHeader{MessageType: Connect}, uuid: uuid.NewUUID()}
case Connack:
cp = &ConnackPacket{FixedHeader: FixedHeader{MessageType: Connack}, uuid: uuid.NewUUID()}
case Disconnect:
cp = &DisconnectPacket{FixedHeader: FixedHeader{MessageType: Disconnect}, uuid: uuid.NewUUID()}
case Publish:
cp = &PublishPacket{FixedHeader: FixedHeader{MessageType: Publish}, uuid: uuid.NewUUID()}
case Puback:
cp = &PubackPacket{FixedHeader: FixedHeader{MessageType: Puback}, uuid: uuid.NewUUID()}
case Pubrec:
cp = &PubrecPacket{FixedHeader: FixedHeader{MessageType: Pubrec}, uuid: uuid.NewUUID()}
case Pubrel:
cp = &PubrelPacket{FixedHeader: FixedHeader{MessageType: Pubrel, Qos: 1}, uuid: uuid.NewUUID()}
case Pubcomp:
cp = &PubcompPacket{FixedHeader: FixedHeader{MessageType: Pubcomp}, uuid: uuid.NewUUID()}
case Subscribe:
cp = &SubscribePacket{FixedHeader: FixedHeader{MessageType: Subscribe, Qos: 1}, uuid: uuid.NewUUID()}
case Suback:
cp = &SubackPacket{FixedHeader: FixedHeader{MessageType: Suback}, uuid: uuid.NewUUID()}
case Unsubscribe:
cp = &UnsubscribePacket{FixedHeader: FixedHeader{MessageType: Unsubscribe, Qos: 1}, uuid: uuid.NewUUID()}
case Unsuback:
cp = &UnsubackPacket{FixedHeader: FixedHeader{MessageType: Unsuback}, uuid: uuid.NewUUID()}
case Pingreq:
cp = &PingreqPacket{FixedHeader: FixedHeader{MessageType: Pingreq}, uuid: uuid.NewUUID()}
case Pingresp:
cp = &PingrespPacket{FixedHeader: FixedHeader{MessageType: Pingresp}, uuid: uuid.NewUUID()}
default:
return nil
}
return cp
}
//NewControlPacketWithHeader is used to create a new ControlPacket of the type
//specified within the FixedHeader that is passed to the function.
//The newly created ControlPacket is empty and a pointer is returned.
func NewControlPacketWithHeader(fh FixedHeader) (cp ControlPacket) {
switch fh.MessageType {
case Connect:
cp = &ConnectPacket{FixedHeader: fh, uuid: uuid.NewUUID()}
case Connack:
cp = &ConnackPacket{FixedHeader: fh, uuid: uuid.NewUUID()}
case Disconnect:
cp = &DisconnectPacket{FixedHeader: fh, uuid: uuid.NewUUID()}
case Publish:
cp = &PublishPacket{FixedHeader: fh, uuid: uuid.NewUUID()}
case Puback:
cp = &PubackPacket{FixedHeader: fh, uuid: uuid.NewUUID()}
case Pubrec:
cp = &PubrecPacket{FixedHeader: fh, uuid: uuid.NewUUID()}
case Pubrel:
cp = &PubrelPacket{FixedHeader: fh, uuid: uuid.NewUUID()}
case Pubcomp:
cp = &PubcompPacket{FixedHeader: fh, uuid: uuid.NewUUID()}
case Subscribe:
cp = &SubscribePacket{FixedHeader: fh, uuid: uuid.NewUUID()}
case Suback:
cp = &SubackPacket{FixedHeader: fh, uuid: uuid.NewUUID()}
case Unsubscribe:
cp = &UnsubscribePacket{FixedHeader: fh, uuid: uuid.NewUUID()}
case Unsuback:
cp = &UnsubackPacket{FixedHeader: fh, uuid: uuid.NewUUID()}
case Pingreq:
cp = &PingreqPacket{FixedHeader: fh, uuid: uuid.NewUUID()}
case Pingresp:
cp = &PingrespPacket{FixedHeader: fh, uuid: uuid.NewUUID()}
default:
return nil
}
return cp
}
//Details struct returned by the Details() function called on
//ControlPackets to present details of the Qos and MessageID
//of the ControlPacket
type Details struct {
Qos byte
MessageID uint16
}
//FixedHeader is a struct to hold the decoded information from
//the fixed header of an MQTT ControlPacket
type FixedHeader struct {
MessageType byte
Dup bool
Qos byte
Retain bool
RemainingLength int
}
func (fh FixedHeader) String() string {
return fmt.Sprintf("%s: dup: %t qos: %d retain: %t rLength: %d", PacketNames[fh.MessageType], fh.Dup, fh.Qos, fh.Retain, fh.RemainingLength)
}
func boolToByte(b bool) byte {
switch b {
case true:
return 1
default:
return 0
}
}
func (fh *FixedHeader) pack() bytes.Buffer {
var header bytes.Buffer
header.WriteByte(fh.MessageType<<4 | boolToByte(fh.Dup)<<3 | fh.Qos<<1 | boolToByte(fh.Retain))
header.Write(encodeLength(fh.RemainingLength))
return header
}
func (fh *FixedHeader) unpack(typeAndFlags byte, r io.Reader) {
fh.MessageType = typeAndFlags >> 4
fh.Dup = (typeAndFlags>>3)&0x01 > 0
fh.Qos = (typeAndFlags >> 1) & 0x03
fh.Retain = typeAndFlags&0x01 > 0
fh.RemainingLength = decodeLength(r)
}
func decodeByte(b io.Reader) byte {
num := make([]byte, 1)
b.Read(num)
return num[0]
}
func decodeUint16(b io.Reader) uint16 {
num := make([]byte, 2)
b.Read(num)
return binary.BigEndian.Uint16(num)
}
func encodeUint16(num uint16) []byte {
bytes := make([]byte, 2)
binary.BigEndian.PutUint16(bytes, num)
return bytes
}
func encodeString(field string) []byte {
fieldLength := make([]byte, 2)
binary.BigEndian.PutUint16(fieldLength, uint16(len(field)))
return append(fieldLength, []byte(field)...)
}
func decodeString(b io.Reader) string {
fieldLength := decodeUint16(b)
field := make([]byte, fieldLength)
b.Read(field)
return string(field)
}
func decodeBytes(b io.Reader) []byte {
fieldLength := decodeUint16(b)
field := make([]byte, fieldLength)
b.Read(field)
return field
}
func encodeBytes(field []byte) []byte {
fieldLength := make([]byte, 2)
binary.BigEndian.PutUint16(fieldLength, uint16(len(field)))
return append(fieldLength, field...)
}
func encodeLength(length int) []byte {
var encLength []byte
for {
digit := byte(length % 128)
length /= 128
if length > 0 {
digit |= 0x80
}
encLength = append(encLength, digit)
if length == 0 {
break
}
}
return encLength
}
func decodeLength(r io.Reader) int {
var rLength uint32
var multiplier uint32
b := make([]byte, 1)
for {
io.ReadFull(r, b)
digit := b[0]
rLength |= uint32(digit&127) << multiplier
if (digit & 128) == 0 {
break
}
multiplier += 7
}
return int(rLength)
}

View File

@@ -0,0 +1,159 @@
package packets
import (
"bytes"
"testing"
)
func TestPacketNames(t *testing.T) {
if PacketNames[1] != "CONNECT" {
t.Errorf("PacketNames[1] is %s, should be %s", PacketNames[1], "CONNECT")
}
if PacketNames[2] != "CONNACK" {
t.Errorf("PacketNames[2] is %s, should be %s", PacketNames[2], "CONNACK")
}
if PacketNames[3] != "PUBLISH" {
t.Errorf("PacketNames[3] is %s, should be %s", PacketNames[3], "PUBLISH")
}
if PacketNames[4] != "PUBACK" {
t.Errorf("PacketNames[4] is %s, should be %s", PacketNames[4], "PUBACK")
}
if PacketNames[5] != "PUBREC" {
t.Errorf("PacketNames[5] is %s, should be %s", PacketNames[5], "PUBREC")
}
if PacketNames[6] != "PUBREL" {
t.Errorf("PacketNames[6] is %s, should be %s", PacketNames[6], "PUBREL")
}
if PacketNames[7] != "PUBCOMP" {
t.Errorf("PacketNames[7] is %s, should be %s", PacketNames[7], "PUBCOMP")
}
if PacketNames[8] != "SUBSCRIBE" {
t.Errorf("PacketNames[8] is %s, should be %s", PacketNames[8], "SUBSCRIBE")
}
if PacketNames[9] != "SUBACK" {
t.Errorf("PacketNames[9] is %s, should be %s", PacketNames[9], "SUBACK")
}
if PacketNames[10] != "UNSUBSCRIBE" {
t.Errorf("PacketNames[10] is %s, should be %s", PacketNames[10], "UNSUBSCRIBE")
}
if PacketNames[11] != "UNSUBACK" {
t.Errorf("PacketNames[11] is %s, should be %s", PacketNames[11], "UNSUBACK")
}
if PacketNames[12] != "PINGREQ" {
t.Errorf("PacketNames[12] is %s, should be %s", PacketNames[12], "PINGREQ")
}
if PacketNames[13] != "PINGRESP" {
t.Errorf("PacketNames[13] is %s, should be %s", PacketNames[13], "PINGRESP")
}
if PacketNames[14] != "DISCONNECT" {
t.Errorf("PacketNames[14] is %s, should be %s", PacketNames[14], "DISCONNECT")
}
}
func TestPacketConsts(t *testing.T) {
if Connect != 1 {
t.Errorf("Const for Connect is %d, should be %d", Connect, 1)
}
if Connack != 2 {
t.Errorf("Const for Connack is %d, should be %d", Connack, 2)
}
if Publish != 3 {
t.Errorf("Const for Publish is %d, should be %d", Publish, 3)
}
if Puback != 4 {
t.Errorf("Const for Puback is %d, should be %d", Puback, 4)
}
if Pubrec != 5 {
t.Errorf("Const for Pubrec is %d, should be %d", Pubrec, 5)
}
if Pubrel != 6 {
t.Errorf("Const for Pubrel is %d, should be %d", Pubrel, 6)
}
if Pubcomp != 7 {
t.Errorf("Const for Pubcomp is %d, should be %d", Pubcomp, 7)
}
if Subscribe != 8 {
t.Errorf("Const for Subscribe is %d, should be %d", Subscribe, 8)
}
if Suback != 9 {
t.Errorf("Const for Suback is %d, should be %d", Suback, 9)
}
if Unsubscribe != 10 {
t.Errorf("Const for Unsubscribe is %d, should be %d", Unsubscribe, 10)
}
if Unsuback != 11 {
t.Errorf("Const for Unsuback is %d, should be %d", Unsuback, 11)
}
if Pingreq != 12 {
t.Errorf("Const for Pingreq is %d, should be %d", Pingreq, 12)
}
if Pingresp != 13 {
t.Errorf("Const for Pingresp is %d, should be %d", Pingresp, 13)
}
if Disconnect != 14 {
t.Errorf("Const for Disconnect is %d, should be %d", Disconnect, 14)
}
}
func TestConnackConsts(t *testing.T) {
if Accepted != 0x00 {
t.Errorf("Const for Accepted is %d, should be %d", Accepted, 0)
}
if ErrRefusedBadProtocolVersion != 0x01 {
t.Errorf("Const for RefusedBadProtocolVersion is %d, should be %d", ErrRefusedBadProtocolVersion, 1)
}
if ErrRefusedIDRejected != 0x02 {
t.Errorf("Const for RefusedIDRejected is %d, should be %d", ErrRefusedIDRejected, 2)
}
if ErrRefusedServerUnavailable != 0x03 {
t.Errorf("Const for RefusedServerUnavailable is %d, should be %d", ErrRefusedServerUnavailable, 3)
}
if ErrRefusedBadUsernameOrPassword != 0x04 {
t.Errorf("Const for RefusedBadUsernameOrPassword is %d, should be %d", ErrRefusedBadUsernameOrPassword, 4)
}
if ErrRefusedNotAuthorised != 0x05 {
t.Errorf("Const for RefusedNotAuthorised is %d, should be %d", ErrRefusedNotAuthorised, 5)
}
}
func TestConnectPacket(t *testing.T) {
connectPacketBytes := bytes.NewBuffer([]byte{16, 52, 0, 4, 77, 81, 84, 84, 4, 204, 0, 0, 0, 0, 0, 4, 116, 101, 115, 116, 0, 12, 84, 101, 115, 116, 32, 80, 97, 121, 108, 111, 97, 100, 0, 8, 116, 101, 115, 116, 117, 115, 101, 114, 0, 8, 116, 101, 115, 116, 112, 97, 115, 115})
packet, err := ReadPacket(connectPacketBytes)
if err != nil {
t.Fatalf("Error reading packet: %s", err.Error())
}
cp := packet.(*ConnectPacket)
if cp.ProtocolName != "MQTT" {
t.Errorf("Connect Packet ProtocolName is %s, should be %s", cp.ProtocolName, "MQTT")
}
if cp.ProtocolVersion != 4 {
t.Errorf("Connect Packet ProtocolVersion is %d, should be %d", cp.ProtocolVersion, 4)
}
if cp.UsernameFlag != true {
t.Errorf("Connect Packet UsernameFlag is %t, should be %t", cp.UsernameFlag, true)
}
if cp.Username != "testuser" {
t.Errorf("Connect Packet Username is %s, should be %s", cp.Username, "testuser")
}
if cp.PasswordFlag != true {
t.Errorf("Connect Packet PasswordFlag is %t, should be %t", cp.PasswordFlag, true)
}
if string(cp.Password) != "testpass" {
t.Errorf("Connect Packet Password is %s, should be %s", string(cp.Password), "testpass")
}
if cp.WillFlag != true {
t.Errorf("Connect Packet WillFlag is %t, should be %t", cp.WillFlag, true)
}
if cp.WillTopic != "test" {
t.Errorf("Connect Packet WillTopic is %s, should be %s", cp.WillTopic, "test")
}
if cp.WillQos != 1 {
t.Errorf("Connect Packet WillQos is %d, should be %d", cp.WillQos, 1)
}
if cp.WillRetain != false {
t.Errorf("Connect Packet WillRetain is %t, should be %t", cp.WillRetain, false)
}
if string(cp.WillMessage) != "Test Payload" {
t.Errorf("Connect Packet WillMessage is %s, should be %s", string(cp.WillMessage), "Test Payload")
}
}

View File

@@ -0,0 +1,44 @@
package packets
import (
"fmt"
"github.com/pborman/uuid"
"io"
)
//PingreqPacket is an internal representation of the fields of the
//Pingreq MQTT packet
type PingreqPacket struct {
FixedHeader
uuid uuid.UUID
}
func (pr *PingreqPacket) String() string {
str := fmt.Sprintf("%s", pr.FixedHeader)
return str
}
func (pr *PingreqPacket) Write(w io.Writer) error {
packet := pr.FixedHeader.pack()
_, err := packet.WriteTo(w)
return err
}
//Unpack decodes the details of a ControlPacket after the fixed
//header has been read
func (pr *PingreqPacket) Unpack(b io.Reader) {
}
//Details returns a Details struct containing the Qos and
//MessageID of this ControlPacket
func (pr *PingreqPacket) Details() Details {
return Details{Qos: 0, MessageID: 0}
}
//UUID returns the unique ID assigned to the ControlPacket when
//it was originally received. Note: this is not related to the
//MessageID field for MQTT packets
func (pr *PingreqPacket) UUID() uuid.UUID {
return pr.uuid
}

View File

@@ -0,0 +1,44 @@
package packets
import (
"fmt"
"github.com/pborman/uuid"
"io"
)
//PingrespPacket is an internal representation of the fields of the
//Pingresp MQTT packet
type PingrespPacket struct {
FixedHeader
uuid uuid.UUID
}
func (pr *PingrespPacket) String() string {
str := fmt.Sprintf("%s", pr.FixedHeader)
return str
}
func (pr *PingrespPacket) Write(w io.Writer) error {
packet := pr.FixedHeader.pack()
_, err := packet.WriteTo(w)
return err
}
//Unpack decodes the details of a ControlPacket after the fixed
//header has been read
func (pr *PingrespPacket) Unpack(b io.Reader) {
}
//Details returns a Details struct containing the Qos and
//MessageID of this ControlPacket
func (pr *PingrespPacket) Details() Details {
return Details{Qos: 0, MessageID: 0}
}
//UUID returns the unique ID assigned to the ControlPacket when
//it was originally received. Note: this is not related to the
//MessageID field for MQTT packets
func (pr *PingrespPacket) UUID() uuid.UUID {
return pr.uuid
}

View File

@@ -0,0 +1,50 @@
package packets
import (
"fmt"
"github.com/pborman/uuid"
"io"
)
//PubackPacket is an internal representation of the fields of the
//Puback MQTT packet
type PubackPacket struct {
FixedHeader
MessageID uint16
uuid uuid.UUID
}
func (pa *PubackPacket) String() string {
str := fmt.Sprintf("%s\n", pa.FixedHeader)
str += fmt.Sprintf("messageID: %d", pa.MessageID)
return str
}
func (pa *PubackPacket) Write(w io.Writer) error {
var err error
pa.FixedHeader.RemainingLength = 2
packet := pa.FixedHeader.pack()
packet.Write(encodeUint16(pa.MessageID))
_, err = packet.WriteTo(w)
return err
}
//Unpack decodes the details of a ControlPacket after the fixed
//header has been read
func (pa *PubackPacket) Unpack(b io.Reader) {
pa.MessageID = decodeUint16(b)
}
//Details returns a Details struct containing the Qos and
//MessageID of this ControlPacket
func (pa *PubackPacket) Details() Details {
return Details{Qos: pa.Qos, MessageID: pa.MessageID}
}
//UUID returns the unique ID assigned to the ControlPacket when
//it was originally received. Note: this is not related to the
//MessageID field for MQTT packets
func (pa *PubackPacket) UUID() uuid.UUID {
return pa.uuid
}

View File

@@ -0,0 +1,50 @@
package packets
import (
"fmt"
"github.com/pborman/uuid"
"io"
)
//PubcompPacket is an internal representation of the fields of the
//Pubcomp MQTT packet
type PubcompPacket struct {
FixedHeader
MessageID uint16
uuid uuid.UUID
}
func (pc *PubcompPacket) String() string {
str := fmt.Sprintf("%s\n", pc.FixedHeader)
str += fmt.Sprintf("MessageID: %d", pc.MessageID)
return str
}
func (pc *PubcompPacket) Write(w io.Writer) error {
var err error
pc.FixedHeader.RemainingLength = 2
packet := pc.FixedHeader.pack()
packet.Write(encodeUint16(pc.MessageID))
_, err = packet.WriteTo(w)
return err
}
//Unpack decodes the details of a ControlPacket after the fixed
//header has been read
func (pc *PubcompPacket) Unpack(b io.Reader) {
pc.MessageID = decodeUint16(b)
}
//Details returns a Details struct containing the Qos and
//MessageID of this ControlPacket
func (pc *PubcompPacket) Details() Details {
return Details{Qos: pc.Qos, MessageID: pc.MessageID}
}
//UUID returns the unique ID assigned to the ControlPacket when
//it was originally received. Note: this is not related to the
//MessageID field for MQTT packets
func (pc *PubcompPacket) UUID() uuid.UUID {
return pc.uuid
}

View File

@@ -0,0 +1,82 @@
package packets
import (
"bytes"
"fmt"
"github.com/pborman/uuid"
"io"
)
//PublishPacket is an internal representation of the fields of the
//Publish MQTT packet
type PublishPacket struct {
FixedHeader
TopicName string
MessageID uint16
Payload []byte
uuid uuid.UUID
}
func (p *PublishPacket) String() string {
str := fmt.Sprintf("%s\n", p.FixedHeader)
str += fmt.Sprintf("topicName: %s MessageID: %d\n", p.TopicName, p.MessageID)
str += fmt.Sprintf("payload: %s\n", string(p.Payload))
return str
}
func (p *PublishPacket) Write(w io.Writer) error {
var body bytes.Buffer
var err error
body.Write(encodeString(p.TopicName))
if p.Qos > 0 {
body.Write(encodeUint16(p.MessageID))
}
p.FixedHeader.RemainingLength = body.Len() + len(p.Payload)
packet := p.FixedHeader.pack()
packet.Write(body.Bytes())
packet.Write(p.Payload)
_, err = w.Write(packet.Bytes())
return err
}
//Unpack decodes the details of a ControlPacket after the fixed
//header has been read
func (p *PublishPacket) Unpack(b io.Reader) {
var payloadLength = p.FixedHeader.RemainingLength
p.TopicName = decodeString(b)
if p.Qos > 0 {
p.MessageID = decodeUint16(b)
payloadLength -= len(p.TopicName) + 4
} else {
payloadLength -= len(p.TopicName) + 2
}
p.Payload = make([]byte, payloadLength)
b.Read(p.Payload)
}
//Copy creates a new PublishPacket with the same topic and payload
//but an empty fixed header, useful for when you want to deliver
//a message with different properties such as Qos but the same
//content
func (p *PublishPacket) Copy() *PublishPacket {
newP := NewControlPacket(Publish).(*PublishPacket)
newP.TopicName = p.TopicName
newP.Payload = p.Payload
return newP
}
//Details returns a Details struct containing the Qos and
//MessageID of this ControlPacket
func (p *PublishPacket) Details() Details {
return Details{Qos: p.Qos, MessageID: p.MessageID}
}
//UUID returns the unique ID assigned to the ControlPacket when
//it was originally received. Note: this is not related to the
//MessageID field for MQTT packets
func (p *PublishPacket) UUID() uuid.UUID {
return p.uuid
}

View File

@@ -0,0 +1,50 @@
package packets
import (
"fmt"
"github.com/pborman/uuid"
"io"
)
//PubrecPacket is an internal representation of the fields of the
//Pubrec MQTT packet
type PubrecPacket struct {
FixedHeader
MessageID uint16
uuid uuid.UUID
}
func (pr *PubrecPacket) String() string {
str := fmt.Sprintf("%s\n", pr.FixedHeader)
str += fmt.Sprintf("MessageID: %d", pr.MessageID)
return str
}
func (pr *PubrecPacket) Write(w io.Writer) error {
var err error
pr.FixedHeader.RemainingLength = 2
packet := pr.FixedHeader.pack()
packet.Write(encodeUint16(pr.MessageID))
_, err = packet.WriteTo(w)
return err
}
//Unpack decodes the details of a ControlPacket after the fixed
//header has been read
func (pr *PubrecPacket) Unpack(b io.Reader) {
pr.MessageID = decodeUint16(b)
}
//Details returns a Details struct containing the Qos and
//MessageID of this ControlPacket
func (pr *PubrecPacket) Details() Details {
return Details{Qos: pr.Qos, MessageID: pr.MessageID}
}
//UUID returns the unique ID assigned to the ControlPacket when
//it was originally received. Note: this is not related to the
//MessageID field for MQTT packets
func (pr *PubrecPacket) UUID() uuid.UUID {
return pr.uuid
}

View File

@@ -0,0 +1,50 @@
package packets
import (
"fmt"
"github.com/pborman/uuid"
"io"
)
//PubrelPacket is an internal representation of the fields of the
//Pubrel MQTT packet
type PubrelPacket struct {
FixedHeader
MessageID uint16
uuid uuid.UUID
}
func (pr *PubrelPacket) String() string {
str := fmt.Sprintf("%s\n", pr.FixedHeader)
str += fmt.Sprintf("MessageID: %d", pr.MessageID)
return str
}
func (pr *PubrelPacket) Write(w io.Writer) error {
var err error
pr.FixedHeader.RemainingLength = 2
packet := pr.FixedHeader.pack()
packet.Write(encodeUint16(pr.MessageID))
_, err = packet.WriteTo(w)
return err
}
//Unpack decodes the details of a ControlPacket after the fixed
//header has been read
func (pr *PubrelPacket) Unpack(b io.Reader) {
pr.MessageID = decodeUint16(b)
}
//Details returns a Details struct containing the Qos and
//MessageID of this ControlPacket
func (pr *PubrelPacket) Details() Details {
return Details{Qos: pr.Qos, MessageID: pr.MessageID}
}
//UUID returns the unique ID assigned to the ControlPacket when
//it was originally received. Note: this is not related to the
//MessageID field for MQTT packets
func (pr *PubrelPacket) UUID() uuid.UUID {
return pr.uuid
}

View File

@@ -0,0 +1,58 @@
package packets
import (
"bytes"
"fmt"
"github.com/pborman/uuid"
"io"
)
//SubackPacket is an internal representation of the fields of the
//Suback MQTT packet
type SubackPacket struct {
FixedHeader
MessageID uint16
GrantedQoss []byte
uuid uuid.UUID
}
func (sa *SubackPacket) String() string {
str := fmt.Sprintf("%s\n", sa.FixedHeader)
str += fmt.Sprintf("MessageID: %d", sa.MessageID)
return str
}
func (sa *SubackPacket) Write(w io.Writer) error {
var body bytes.Buffer
var err error
body.Write(encodeUint16(sa.MessageID))
body.Write(sa.GrantedQoss)
sa.FixedHeader.RemainingLength = body.Len()
packet := sa.FixedHeader.pack()
packet.Write(body.Bytes())
_, err = packet.WriteTo(w)
return err
}
//Unpack decodes the details of a ControlPacket after the fixed
//header has been read
func (sa *SubackPacket) Unpack(b io.Reader) {
var qosBuffer bytes.Buffer
sa.MessageID = decodeUint16(b)
qosBuffer.ReadFrom(b)
sa.GrantedQoss = qosBuffer.Bytes()
}
//Details returns a Details struct containing the Qos and
//MessageID of this ControlPacket
func (sa *SubackPacket) Details() Details {
return Details{Qos: 0, MessageID: sa.MessageID}
}
//UUID returns the unique ID assigned to the ControlPacket when
//it was originally received. Note: this is not related to the
//MessageID field for MQTT packets
func (sa *SubackPacket) UUID() uuid.UUID {
return sa.uuid
}

View File

@@ -0,0 +1,68 @@
package packets
import (
"bytes"
"fmt"
"github.com/pborman/uuid"
"io"
)
//SubscribePacket is an internal representation of the fields of the
//Subscribe MQTT packet
type SubscribePacket struct {
FixedHeader
MessageID uint16
Topics []string
Qoss []byte
uuid uuid.UUID
}
func (s *SubscribePacket) String() string {
str := fmt.Sprintf("%s\n", s.FixedHeader)
str += fmt.Sprintf("MessageID: %d topics: %s", s.MessageID, s.Topics)
return str
}
func (s *SubscribePacket) Write(w io.Writer) error {
var body bytes.Buffer
var err error
body.Write(encodeUint16(s.MessageID))
for i, topic := range s.Topics {
body.Write(encodeString(topic))
body.WriteByte(s.Qoss[i])
}
s.FixedHeader.RemainingLength = body.Len()
packet := s.FixedHeader.pack()
packet.Write(body.Bytes())
_, err = packet.WriteTo(w)
return err
}
//Unpack decodes the details of a ControlPacket after the fixed
//header has been read
func (s *SubscribePacket) Unpack(b io.Reader) {
s.MessageID = decodeUint16(b)
payloadLength := s.FixedHeader.RemainingLength - 2
for payloadLength > 0 {
topic := decodeString(b)
s.Topics = append(s.Topics, topic)
qos := decodeByte(b)
s.Qoss = append(s.Qoss, qos)
payloadLength -= 2 + len(topic) + 1 //2 bytes of string length, plus string, plus 1 byte for Qos
}
}
//Details returns a Details struct containing the Qos and
//MessageID of this ControlPacket
func (s *SubscribePacket) Details() Details {
return Details{Qos: 1, MessageID: s.MessageID}
}
//UUID returns the unique ID assigned to the ControlPacket when
//it was originally received. Note: this is not related to the
//MessageID field for MQTT packets
func (s *SubscribePacket) UUID() uuid.UUID {
return s.uuid
}

View File

@@ -0,0 +1,50 @@
package packets
import (
"fmt"
"github.com/pborman/uuid"
"io"
)
//UnsubackPacket is an internal representation of the fields of the
//Unsuback MQTT packet
type UnsubackPacket struct {
FixedHeader
MessageID uint16
uuid uuid.UUID
}
func (ua *UnsubackPacket) String() string {
str := fmt.Sprintf("%s\n", ua.FixedHeader)
str += fmt.Sprintf("MessageID: %d", ua.MessageID)
return str
}
func (ua *UnsubackPacket) Write(w io.Writer) error {
var err error
ua.FixedHeader.RemainingLength = 2
packet := ua.FixedHeader.pack()
packet.Write(encodeUint16(ua.MessageID))
_, err = packet.WriteTo(w)
return err
}
//Unpack decodes the details of a ControlPacket after the fixed
//header has been read
func (ua *UnsubackPacket) Unpack(b io.Reader) {
ua.MessageID = decodeUint16(b)
}
//Details returns a Details struct containing the Qos and
//MessageID of this ControlPacket
func (ua *UnsubackPacket) Details() Details {
return Details{Qos: 0, MessageID: ua.MessageID}
}
//UUID returns the unique ID assigned to the ControlPacket when
//it was originally received. Note: this is not related to the
//MessageID field for MQTT packets
func (ua *UnsubackPacket) UUID() uuid.UUID {
return ua.uuid
}

View File

@@ -0,0 +1,61 @@
package packets
import (
"bytes"
"fmt"
"github.com/pborman/uuid"
"io"
)
//UnsubscribePacket is an internal representation of the fields of the
//Unsubscribe MQTT packet
type UnsubscribePacket struct {
FixedHeader
MessageID uint16
Topics []string
uuid uuid.UUID
}
func (u *UnsubscribePacket) String() string {
str := fmt.Sprintf("%s\n", u.FixedHeader)
str += fmt.Sprintf("MessageID: %d", u.MessageID)
return str
}
func (u *UnsubscribePacket) Write(w io.Writer) error {
var body bytes.Buffer
var err error
body.Write(encodeUint16(u.MessageID))
for _, topic := range u.Topics {
body.Write(encodeString(topic))
}
u.FixedHeader.RemainingLength = body.Len()
packet := u.FixedHeader.pack()
packet.Write(body.Bytes())
_, err = packet.WriteTo(w)
return err
}
//Unpack decodes the details of a ControlPacket after the fixed
//header has been read
func (u *UnsubscribePacket) Unpack(b io.Reader) {
u.MessageID = decodeUint16(b)
var topic string
for topic = decodeString(b); topic != ""; topic = decodeString(b) {
u.Topics = append(u.Topics, topic)
}
}
//Details returns a Details struct containing the Qos and
//MessageID of this ControlPacket
func (u *UnsubscribePacket) Details() Details {
return Details{Qos: 1, MessageID: u.MessageID}
}
//UUID returns the unique ID assigned to the ControlPacket when
//it was originally received. Note: this is not related to the
//MessageID field for MQTT packets
func (u *UnsubscribePacket) UUID() uuid.UUID {
return u.uuid
}

View File

@@ -0,0 +1,73 @@
/*
* Copyright (c) 2013 IBM Corp.
*
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Eclipse Public License v1.0
* which accompanies this distribution, and is available at
* http://www.eclipse.org/legal/epl-v10.html
*
* Contributors:
* Seth Hoenig
* Allan Stockdill-Mander
* Mike Robertson
*/
package mqtt
import (
"errors"
"git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/packets"
"sync"
"time"
)
type lastcontact struct {
sync.Mutex
lasttime time.Time
}
func (l *lastcontact) update() {
l.Lock()
defer l.Unlock()
l.lasttime = time.Now()
}
func (l *lastcontact) get() time.Time {
l.Lock()
defer l.Unlock()
return l.lasttime
}
func keepalive(c *Client) {
DEBUG.Println(PNG, "keepalive starting")
c.pingOutstanding = false
for {
select {
case <-c.stop:
DEBUG.Println(PNG, "keepalive stopped")
c.workers.Done()
return
default:
last := uint(time.Since(c.lastContact.get()).Seconds())
//DEBUG.Printf("%s last contact: %d (timeout: %d)", PNG, last, uint(c.options.KeepAlive.Seconds()))
if last > uint(c.options.KeepAlive.Seconds()) {
if !c.pingOutstanding {
DEBUG.Println(PNG, "keepalive sending ping")
ping := packets.NewControlPacket(packets.Pingreq).(*packets.PingreqPacket)
//We don't want to wait behind large messages being sent, the Write call
//will block until it it able to send the packet.
ping.Write(c.conn)
c.pingOutstanding = true
} else {
CRITICAL.Println(PNG, "pingresp not received, disconnecting")
c.workers.Done()
c.internalConnLost(errors.New("pingresp not received, disconnecting"))
return
}
}
time.Sleep(1 * time.Second)
}
}
}

View File

@@ -0,0 +1,162 @@
/*
* Copyright (c) 2013 IBM Corp.
*
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Eclipse Public License v1.0
* which accompanies this distribution, and is available at
* http://www.eclipse.org/legal/epl-v10.html
*
* Contributors:
* Seth Hoenig
* Allan Stockdill-Mander
* Mike Robertson
*/
package mqtt
import (
"container/list"
"git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/packets"
"strings"
"sync"
)
// route is a type which associates MQTT Topic strings with a
// callback to be executed upon the arrival of a message associated
// with a subscription to that topic.
type route struct {
topic string
callback MessageHandler
}
// match takes a slice of strings which represent the route being tested having been split on '/'
// separators, and a slice of strings representing the topic string in the published message, similarly
// split.
// The function determines if the topic string matches the route according to the MQTT topic rules
// and returns a boolean of the outcome
func match(route []string, topic []string) bool {
if len(route) == 0 {
if len(topic) == 0 {
return true
}
return false
}
if len(topic) == 0 {
if route[0] == "#" {
return true
}
return false
}
if route[0] == "#" {
return true
}
if (route[0] == "+") || (route[0] == topic[0]) {
return match(route[1:], topic[1:])
}
return false
}
func routeIncludesTopic(route, topic string) bool {
return match(strings.Split(route, "/"), strings.Split(topic, "/"))
}
// match takes the topic string of the published message and does a basic compare to the
// string of the current Route, if they match it returns true
func (r *route) match(topic string) bool {
return r.topic == topic || routeIncludesTopic(r.topic, topic)
}
type router struct {
sync.RWMutex
routes *list.List
defaultHandler MessageHandler
messages chan *packets.PublishPacket
stop chan bool
}
// newRouter returns a new instance of a Router and channel which can be used to tell the Router
// to stop
func newRouter() (*router, chan bool) {
router := &router{routes: list.New(), messages: make(chan *packets.PublishPacket), stop: make(chan bool)}
stop := router.stop
return router, stop
}
// addRoute takes a topic string and MessageHandler callback. It looks in the current list of
// routes to see if there is already a matching Route. If there is it replaces the current
// callback with the new one. If not it add a new entry to the list of Routes.
func (r *router) addRoute(topic string, callback MessageHandler) {
r.Lock()
defer r.Unlock()
for e := r.routes.Front(); e != nil; e = e.Next() {
if e.Value.(*route).match(topic) {
r := e.Value.(*route)
r.callback = callback
return
}
}
r.routes.PushBack(&route{topic: topic, callback: callback})
}
// deleteRoute takes a route string, looks for a matching Route in the list of Routes. If
// found it removes the Route from the list.
func (r *router) deleteRoute(topic string) {
r.Lock()
defer r.Unlock()
for e := r.routes.Front(); e != nil; e = e.Next() {
if e.Value.(*route).match(topic) {
r.routes.Remove(e)
return
}
}
}
// setDefaultHandler assigns a default callback that will be called if no matching Route
// is found for an incoming Publish.
func (r *router) setDefaultHandler(handler MessageHandler) {
r.defaultHandler = handler
}
// matchAndDispatch takes a channel of Message pointers as input and starts a go routine that
// takes messages off the channel, matches them against the internal route list and calls the
// associated callback (or the defaultHandler, if one exists and no other route matched). If
// anything is sent down the stop channel the function will end.
func (r *router) matchAndDispatch(messages <-chan *packets.PublishPacket, order bool, client *Client) {
go func() {
for {
select {
case message := <-messages:
sent := false
r.RLock()
for e := r.routes.Front(); e != nil; e = e.Next() {
if e.Value.(*route).match(message.TopicName) {
if order {
r.RUnlock()
e.Value.(*route).callback(client, messageFromPublish(message))
r.RLock()
} else {
go e.Value.(*route).callback(client, messageFromPublish(message))
}
sent = true
}
}
r.RUnlock()
if !sent && r.defaultHandler != nil {
if order {
r.RLock()
r.defaultHandler(client, messageFromPublish(message))
r.RUnlock()
} else {
go r.defaultHandler(client, messageFromPublish(message))
}
}
case <-r.stop:
return
}
}
}()
}

View File

@@ -0,0 +1,18 @@
package main
import (
"time"
MQTT "git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git"
)
func main() {
opts := MQTT.NewClientOptions().AddBroker("tcp://localhost:1883")
c := MQTT.NewClient(opts)
if token := c.Connect(); token.Wait() && token.Error() != nil {
panic(token.Error())
}
for {
time.Sleep(1 * time.Second)
}
}

View File

@@ -0,0 +1,10 @@
#!/bin/sh
go clean
for file in *.go
do
echo -n "Compiling $file ..."
go build "$file"
echo " done."
done

View File

@@ -0,0 +1,23 @@
package main
import (
"fmt"
"time"
MQTT "git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git"
)
func main() {
opts := MQTT.NewClientOptions().AddBroker("tcp://localhost:1883")
opts.SetCleanSession(true)
c := MQTT.NewClient(opts)
if token := c.Connect(); token.Wait() && token.Error() != nil {
panic(token.Error())
}
fmt.Println("plz mosquitto goes down now")
time.Sleep(5 * time.Second)
c.Disconnect(200)
time.Sleep(5 * time.Second)
}

View File

@@ -0,0 +1,96 @@
/*
* Copyright (c) 2013 IBM Corp.
*
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Eclipse Public License v1.0
* which accompanies this distribution, and is available at
* http://www.eclipse.org/legal/epl-v10.html
*
* Contributors:
* Seth Hoenig
* Allan Stockdill-Mander
* Mike Robertson
*/
// This demonstrates how to implement your own Store interface and provide
// it to the go-mqtt client.
package main
import (
"fmt"
"time"
MQTT "git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git"
"git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/packets"
)
// This NoOpStore type implements the go-mqtt/Store interface, which
// allows it to be used by the go-mqtt client library. However, it is
// highly recommended that you do not use this NoOpStore in production,
// because it will NOT provide any sort of guaruntee of message delivery.
type NoOpStore struct {
// Contain nothing
}
func (store *NoOpStore) Open() {
// Do nothing
}
func (store *NoOpStore) Put(string, packets.ControlPacket) {
// Do nothing
}
func (store *NoOpStore) Get(string) packets.ControlPacket {
// Do nothing
return nil
}
func (store *NoOpStore) Del(string) {
// Do nothing
}
func (store *NoOpStore) All() []string {
return nil
}
func (store *NoOpStore) Close() {
// Do Nothing
}
func (store *NoOpStore) Reset() {
// Do Nothing
}
func main() {
myNoOpStore := &NoOpStore{}
opts := MQTT.NewClientOptions()
opts.AddBroker("tcp://iot.eclipse.org:1883")
opts.SetClientID("custom-store")
opts.SetStore(myNoOpStore)
var callback MQTT.MessageHandler = func(client *MQTT.Client, msg MQTT.Message) {
fmt.Printf("TOPIC: %s\n", msg.Topic())
fmt.Printf("MSG: %s\n", msg.Payload())
}
c := MQTT.NewClient(opts)
if token := c.Connect(); token.Wait() && token.Error() != nil {
panic(token.Error())
}
c.Subscribe("/go-mqtt/sample", 0, callback)
for i := 0; i < 5; i++ {
text := fmt.Sprintf("this is msg #%d!", i)
token := c.Publish("/go-mqtt/sample", 0, false, text)
token.Wait()
}
for i := 1; i < 5; i++ {
time.Sleep(1 * time.Second)
}
c.Disconnect(250)
}

View File

@@ -0,0 +1,745 @@
# Config file for mosquitto
#
# See mosquitto.conf(5) for more information.
#
# Default values are shown, uncomment to change.
#
# Use the # character to indicate a comment, but only if it is the
# very first character on the line.
# =================================================================
# General configuration
# =================================================================
# Time in seconds to wait before resending an outgoing QoS=1 or
# QoS=2 message.
#retry_interval 20
# Time in seconds between updates of the $SYS tree.
# Set to 0 to disable the publishing of the $SYS tree.
#sys_interval 10
# Time in seconds between cleaning the internal message store of
# unreferenced messages. Lower values will result in lower memory
# usage but more processor time, higher values will have the
# opposite effect.
# Setting a value of 0 means the unreferenced messages will be
# disposed of as quickly as possible.
#store_clean_interval 10
# Write process id to a file. Default is a blank string which means
# a pid file shouldn't be written.
# This should be set to /var/run/mosquitto.pid if mosquitto is
# being run automatically on boot with an init script and
# start-stop-daemon or similar.
#pid_file
# When run as root, drop privileges to this user and its primary
# group.
# Leave blank to stay as root, but this is not recommended.
# If run as a non-root user, this setting has no effect.
# Note that on Windows this has no effect and so mosquitto should
# be started by the user you wish it to run as.
#user mosquitto
# The maximum number of QoS 1 and 2 messages currently inflight per
# client.
# This includes messages that are partway through handshakes and
# those that are being retried. Defaults to 20. Set to 0 for no
# maximum. Setting to 1 will guarantee in-order delivery of QoS 1
# and 2 messages.
#max_inflight_messages 20
# The maximum number of QoS 1 and 2 messages to hold in a queue
# above those that are currently in-flight. Defaults to 100. Set
# to 0 for no maximum (not recommended).
# See also queue_qos0_messages.
#max_queued_messages 100
# Set to true to queue messages with QoS 0 when a persistent client is
# disconnected. These messages are included in the limit imposed by
# max_queued_messages.
# Defaults to false.
# This is a non-standard option for the MQTT v3.1 spec but is allowed in
# v3.1.1.
#queue_qos0_messages false
# This option sets the maximum publish payload size that the broker will allow.
# Received messages that exceed this size will not be accepted by the broker.
# The default value is 0, which means that all valid MQTT messages are
# accepted. MQTT imposes a maximum payload size of 268435455 bytes.
#message_size_limit 0
# This option controls whether a client is allowed to connect with a zero
# length client id or not. This option only affects clients using MQTT v3.1.1
# and later. If set to false, clients connecting with a zero length client id
# are disconnected. If set to true, clients will be allocated a client id by
# the broker. This means it is only useful for clients with clean session set
# to true.
#allow_zero_length_clientid true
# If allow_zero_length_clientid is true, this option allows you to set a prefix
# to automatically generated client ids to aid visibility in logs.
#auto_id_prefix
# This option allows persistent clients (those with clean session set to false)
# to be removed if they do not reconnect within a certain time frame.
#
# This is a non-standard option in MQTT V3.1 but allowed in MQTT v3.1.1.
#
# Badly designed clients may set clean session to false whilst using a randomly
# generated client id. This leads to persistent clients that will never
# reconnect. This option allows these clients to be removed.
#
# The expiration period should be an integer followed by one of d w m y for
# day, week, month and year respectively. For example
#
# persistent_client_expiration 2m
# persistent_client_expiration 14d
# persistent_client_expiration 1y
#
# The default if not set is to never expire persistent clients.
#persistent_client_expiration
# If a client is subscribed to multiple subscriptions that overlap, e.g. foo/#
# and foo/+/baz , then MQTT expects that when the broker receives a message on
# a topic that matches both subscriptions, such as foo/bar/baz, then the client
# should only receive the message once.
# Mosquitto keeps track of which clients a message has been sent to in order to
# meet this requirement. The allow_duplicate_messages option allows this
# behaviour to be disabled, which may be useful if you have a large number of
# clients subscribed to the same set of topics and are very concerned about
# minimising memory usage.
# It can be safely set to true if you know in advance that your clients will
# never have overlapping subscriptions, otherwise your clients must be able to
# correctly deal with duplicate messages even when then have QoS=2.
#allow_duplicate_messages false
# The MQTT specification requires that the QoS of a message delivered to a
# subscriber is never upgraded to match the QoS of the subscription. Enabling
# this option changes this behaviour. If upgrade_outgoing_qos is set true,
# messages sent to a subscriber will always match the QoS of its subscription.
# This is a non-standard option explicitly disallowed by the spec.
#upgrade_outgoing_qos false
# =================================================================
# Default listener
# =================================================================
# IP address/hostname to bind the default listener to. If not
# given, the default listener will not be bound to a specific
# address and so will be accessible to all network interfaces.
# bind_address ip-address/host name
#bind_address
# Port to use for the default listener.
#port 1883
# The maximum number of client connections to allow. This is
# a per listener setting.
# Default is -1, which means unlimited connections.
# Note that other process limits mean that unlimited connections
# are not really possible. Typically the default maximum number of
# connections possible is around 1024.
#max_connections -1
# -----------------------------------------------------------------
# Certificate based SSL/TLS support
# -----------------------------------------------------------------
# The following options can be used to enable SSL/TLS support for
# this listener. Note that the recommended port for MQTT over TLS
# is 8883, but this must be set manually.
#
# See also the mosquitto-tls man page.
# At least one of cafile or capath must be defined. They both
# define methods of accessing the PEM encoded Certificate
# Authority certificates that have signed your server certificate
# and that you wish to trust.
# cafile defines the path to a file containing the CA certificates.
# capath defines a directory that will be searched for files
# containing the CA certificates. For capath to work correctly, the
# certificate files must have ".crt" as the file ending and you must run
# "c_rehash <path to capath>" each time you add/remove a certificate.
#cafile
#capath
# Path to the PEM encoded server certificate.
#certfile
# Path to the PEM encoded keyfile.
#keyfile
# This option defines the version of the TLS protocol to use for this listener.
# The default value will always be the highest version that is available for
# the version of openssl that the broker was compiled against. For openssl >=
# 1.0.1 the valid values are tlsv1.2 tlsv1.1 and tlsv1. For openssl < 1.0.1 the
# valid values are tlsv1.
#tls_version
# By default a TLS enabled listener will operate in a similar fashion to a
# https enabled web server, in that the server has a certificate signed by a CA
# and the client will verify that it is a trusted certificate. The overall aim
# is encryption of the network traffic. By setting require_certificate to true,
# the client must provide a valid certificate in order for the network
# connection to proceed. This allows access to the broker to be controlled
# outside of the mechanisms provided by MQTT.
#require_certificate false
# If require_certificate is true, you may set use_identity_as_username to true
# to use the CN value from the client certificate as a username. If this is
# true, the password_file option will not be used for this listener.
#use_identity_as_username false
# If you have require_certificate set to true, you can create a certificate
# revocation list file to revoke access to particular client certificates. If
# you have done this, use crlfile to point to the PEM encoded revocation file.
#crlfile
# If you wish to control which encryption ciphers are used, use the ciphers
# option. The list of available ciphers can be optained using the "openssl
# ciphers" command and should be provided in the same format as the output of
# that command.
# If unset defaults to DEFAULT:!aNULL:!eNULL:!LOW:!EXPORT:!SSLv2:@STRENGTH
#ciphers DEFAULT:!aNULL:!eNULL:!LOW:!EXPORT:!SSLv2:@STRENGTH
# -----------------------------------------------------------------
# Pre-shared-key based SSL/TLS support
# -----------------------------------------------------------------
# The following options can be used to enable PSK based SSL/TLS support for
# this listener. Note that the recommended port for MQTT over TLS is 8883, but
# this must be set manually.
#
# See also the mosquitto-tls man page and the "Certificate based SSL/TLS
# support" section. Only one of certificate or PSK encryption support can be
# enabled for any listener.
# The psk_hint option enables pre-shared-key support for this listener and also
# acts as an identifier for this listener. The hint is sent to clients and may
# be used locally to aid authentication. The hint is a free form string that
# doesn't have much meaning in itself, so feel free to be creative.
# If this option is provided, see psk_file to define the pre-shared keys to be
# used or create a security plugin to handle them.
#psk_hint
# Set use_identity_as_username to have the psk identity sent by the client used
# as its username. Authentication will be carried out using the PSK rather than
# the MQTT username/password and so password_file will not be used for this
# listener.
#use_identity_as_username false
# When using PSK, the encryption ciphers used will be chosen from the list of
# available PSK ciphers. If you want to control which ciphers are available,
# use the "ciphers" option. The list of available ciphers can be optained
# using the "openssl ciphers" command and should be provided in the same format
# as the output of that command.
#ciphers
# =================================================================
# Extra listeners
# =================================================================
# Listen on a port/ip address combination. By using this variable
# multiple times, mosquitto can listen on more than one port. If
# this variable is used and neither bind_address nor port given,
# then the default listener will not be started.
# The port number to listen on must be given. Optionally, an ip
# address or host name may be supplied as a second argument. In
# this case, mosquitto will attempt to bind the listener to that
# address and so restrict access to the associated network and
# interface. By default, mosquitto will listen on all interfaces.
# listener port-number [ip address/host name]
#listener
# The maximum number of client connections to allow. This is
# a per listener setting.
# Default is -1, which means unlimited connections.
# Note that other process limits mean that unlimited connections
# are not really possible. Typically the default maximum number of
# connections possible is around 1024.
#max_connections -1
# The listener can be restricted to operating within a topic hierarchy using
# the mount_point option. This is achieved be prefixing the mount_point string
# to all topics for any clients connected to this listener. This prefixing only
# happens internally to the broker; the client will not see the prefix.
#mount_point
# -----------------------------------------------------------------
# Certificate based SSL/TLS support
# -----------------------------------------------------------------
# The following options can be used to enable certificate based SSL/TLS support
# for this listener. Note that the recommended port for MQTT over TLS is 8883,
# but this must be set manually.
#
# See also the mosquitto-tls man page and the "Pre-shared-key based SSL/TLS
# support" section. Only one of certificate or PSK encryption support can be
# enabled for any listener.
# At least one of cafile or capath must be defined to enable certificate based
# TLS encryption. They both define methods of accessing the PEM encoded
# Certificate Authority certificates that have signed your server certificate
# and that you wish to trust.
# cafile defines the path to a file containing the CA certificates.
# capath defines a directory that will be searched for files
# containing the CA certificates. For capath to work correctly, the
# certificate files must have ".crt" as the file ending and you must run
# "c_rehash <path to capath>" each time you add/remove a certificate.
#cafile
#capath
# Path to the PEM encoded server certificate.
#certfile
# Path to the PEM encoded keyfile.
#keyfile
# By default an TLS enabled listener will operate in a similar fashion to a
# https enabled web server, in that the server has a certificate signed by a CA
# and the client will verify that it is a trusted certificate. The overall aim
# is encryption of the network traffic. By setting require_certificate to true,
# the client must provide a valid certificate in order for the network
# connection to proceed. This allows access to the broker to be controlled
# outside of the mechanisms provided by MQTT.
#require_certificate false
# If require_certificate is true, you may set use_identity_as_username to true
# to use the CN value from the client certificate as a username. If this is
# true, the password_file option will not be used for this listener.
#use_identity_as_username false
# If you have require_certificate set to true, you can create a certificate
# revocation list file to revoke access to particular client certificates. If
# you have done this, use crlfile to point to the PEM encoded revocation file.
#crlfile
# If you wish to control which encryption ciphers are used, use the ciphers
# option. The list of available ciphers can be optained using the "openssl
# ciphers" command and should be provided in the same format as the output of
# that command.
#ciphers
# -----------------------------------------------------------------
# Pre-shared-key based SSL/TLS support
# -----------------------------------------------------------------
# The following options can be used to enable PSK based SSL/TLS support for
# this listener. Note that the recommended port for MQTT over TLS is 8883, but
# this must be set manually.
#
# See also the mosquitto-tls man page and the "Certificate based SSL/TLS
# support" section. Only one of certificate or PSK encryption support can be
# enabled for any listener.
# The psk_hint option enables pre-shared-key support for this listener and also
# acts as an identifier for this listener. The hint is sent to clients and may
# be used locally to aid authentication. The hint is a free form string that
# doesn't have much meaning in itself, so feel free to be creative.
# If this option is provided, see psk_file to define the pre-shared keys to be
# used or create a security plugin to handle them.
#psk_hint
# Set use_identity_as_username to have the psk identity sent by the client used
# as its username. Authentication will be carried out using the PSK rather than
# the MQTT username/password and so password_file will not be used for this
# listener.
#use_identity_as_username false
# When using PSK, the encryption ciphers used will be chosen from the list of
# available PSK ciphers. If you want to control which ciphers are available,
# use the "ciphers" option. The list of available ciphers can be optained
# using the "openssl ciphers" command and should be provided in the same format
# as the output of that command.
#ciphers
# =================================================================
# Persistence
# =================================================================
# If persistence is enabled, save the in-memory database to disk
# every autosave_interval seconds. If set to 0, the persistence
# database will only be written when mosquitto exits. See also
# autosave_on_changes.
# Note that writing of the persistence database can be forced by
# sending mosquitto a SIGUSR1 signal.
#autosave_interval 1800
# If true, mosquitto will count the number of subscription changes, retained
# messages received and queued messages and if the total exceeds
# autosave_interval then the in-memory database will be saved to disk.
# If false, mosquitto will save the in-memory database to disk by treating
# autosave_interval as a time in seconds.
#autosave_on_changes false
# Save persistent message data to disk (true/false).
# This saves information about all messages, including
# subscriptions, currently in-flight messages and retained
# messages.
# retained_persistence is a synonym for this option.
#persistence false
# The filename to use for the persistent database, not including
# the path.
#persistence_file mosquitto.db
# Location for persistent database. Must include trailing /
# Default is an empty string (current directory).
# Set to e.g. /var/lib/mosquitto/ if running as a proper service on Linux or
# similar.
#persistence_location
# =================================================================
# Logging
# =================================================================
# Places to log to. Use multiple log_dest lines for multiple
# logging destinations.
# Possible destinations are: stdout stderr syslog topic file
#
# stdout and stderr log to the console on the named output.
#
# syslog uses the userspace syslog facility which usually ends up
# in /var/log/messages or similar.
#
# topic logs to the broker topic '$SYS/broker/log/<severity>',
# where severity is one of D, E, W, N, I, M which are debug, error,
# warning, notice, information and message. Message type severity is used by
# the subscribe/unsubscribe log_types and publishes log messages to
# $SYS/broker/log/M/susbcribe or $SYS/broker/log/M/unsubscribe.
#
# The file destination requires an additional parameter which is the file to be
# logged to, e.g. "log_dest file /var/log/mosquitto.log". The file will be
# closed and reopened when the broker receives a HUP signal. Only a single file
# destination may be configured.
#
# Note that if the broker is running as a Windows service it will default to
# "log_dest none" and neither stdout nor stderr logging is available.
# Use "log_dest none" if you wish to disable logging.
log_dest stdout
# Types of messages to log. Use multiple log_type lines for logging
# multiple types of messages.
# Possible types are: debug, error, warning, notice, information,
# none, subscribe, unsubscribe, all.
# Note that debug type messages are for decoding the incoming/outgoing
# network packets. They are not logged in "topics".
#log_type error
#log_type warning
#log_type notice
log_type information
# If set to true, client connection and disconnection messages will be included
# in the log.
#connection_messages true
# If set to true, add a timestamp value to each log message.
#log_timestamp true
# =================================================================
# Security
# =================================================================
# If set, only clients that have a matching prefix on their
# clientid will be allowed to connect to the broker. By default,
# all clients may connect.
# For example, setting "secure-" here would mean a client "secure-
# client" could connect but another with clientid "mqtt" couldn't.
#clientid_prefixes
# Boolean value that determines whether clients that connect
# without providing a username are allowed to connect. If set to
# false then a password file should be created (see the
# password_file option) to control authenticated client access.
# Defaults to true.
#allow_anonymous true
# In addition to the clientid_prefixes, allow_anonymous and TLS
# authentication options, username based authentication is also
# possible. The default support is described in "Default
# authentication and topic access control" below. The auth_plugin
# allows another authentication method to be used.
# Specify the path to the loadable plugin and see the
# "Authentication and topic access plugin options" section below.
#auth_plugin
# -----------------------------------------------------------------
# Default authentication and topic access control
# -----------------------------------------------------------------
# Control access to the broker using a password file. This file can be
# generated using the mosquitto_passwd utility. If TLS support is not compiled
# into mosquitto (it is recommended that TLS support should be included) then
# plain text passwords are used, in which case the file should be a text file
# with lines in the format:
# username:password
# The password (and colon) may be omitted if desired, although this
# offers very little in the way of security.
#
# See the TLS client require_certificate and use_identity_as_username options
# for alternative authentication options.
password_file pwfile.example
# Access may also be controlled using a pre-shared-key file. This requires
# TLS-PSK support and a listener configured to use it. The file should be text
# lines in the format:
# identity:key
# The key should be in hexadecimal format without a leading "0x".
#psk_file
# Control access to topics on the broker using an access control list
# file. If this parameter is defined then only the topics listed will
# have access.
# If the first character of a line of the ACL file is a # it is treated as a
# comment.
# Topic access is added with lines of the format:
#
# topic [read|write] <topic>
#
# The access type is controlled using "read" or "write". This parameter
# is optional - if not given then the access is read/write.
# <topic> can contain the + or # wildcards as in subscriptions.
#
# The first set of topics are applied to anonymous clients, assuming
# allow_anonymous is true. User specific topic ACLs are added after a
# user line as follows:
#
# user <username>
#
# The username referred to here is the same as in password_file. It is
# not the clientid.
#
#
# If is also possible to define ACLs based on pattern substitution within the
# topic. The patterns available for substition are:
#
# %c to match the client id of the client
# %u to match the username of the client
#
# The substitution pattern must be the only text for that level of hierarchy.
#
# The form is the same as for the topic keyword, but using pattern as the
# keyword.
# Pattern ACLs apply to all users even if the "user" keyword has previously
# been given.
#
# If using bridges with usernames and ACLs, connection messages can be allowed
# with the following pattern:
# pattern write $SYS/broker/connection/%c/state
#
# pattern [read|write] <topic>
#
# Example:
#
# pattern write sensor/%u/data
#
#acl_file
# -----------------------------------------------------------------
# Authentication and topic access plugin options
# -----------------------------------------------------------------
# If the auth_plugin option above is used, define options to pass to the
# plugin here as described by the plugin instructions. All options named
# using the format auth_opt_* will be passed to the plugin, for example:
#
# auth_opt_db_host
# auth_opt_db_port
# auth_opt_db_username
# auth_opt_db_password
# =================================================================
# Bridges
# =================================================================
# A bridge is a way of connecting multiple MQTT brokers together.
# Create a new bridge using the "connection" option as described below. Set
# options for the bridges using the remaining parameters. You must specify the
# address and at least one topic to subscribe to.
# Each connection must have a unique name.
# The address line may have multiple host address and ports specified. See
# below in the round_robin description for more details on bridge behaviour if
# multiple addresses are used.
# The direction that the topic will be shared can be chosen by
# specifying out, in or both, where the default value is out.
# The QoS level of the bridged communication can be specified with the next
# topic option. The default QoS level is 0, to change the QoS the topic
# direction must also be given.
# The local and remote prefix options allow a topic to be remapped when it is
# bridged to/from the remote broker. This provides the ability to place a topic
# tree in an appropriate location.
# For more details see the mosquitto.conf man page.
# Multiple topics can be specified per connection, but be careful
# not to create any loops.
# If you are using bridges with cleansession set to false (the default), then
# you may get unexpected behaviour from incoming topics if you change what
# topics you are subscribing to. This is because the remote broker keeps the
# subscription for the old topic. If you have this problem, connect your bridge
# with cleansession set to true, then reconnect with cleansession set to false
# as normal.
#connection <name>
#address <host>[:<port>] [<host>[:<port>]]
#topic <topic> [[[out | in | both] qos-level] local-prefix remote-prefix]
# If the bridge has more than one address given in the address/addresses
# configuration, the round_robin option defines the behaviour of the bridge on
# a failure of the bridge connection. If round_robin is false, the default
# value, then the first address is treated as the main bridge connection. If
# the connection fails, the other secondary addresses will be attempted in
# turn. Whilst connected to a secondary bridge, the bridge will periodically
# attempt to reconnect to the main bridge until successful.
# If round_robin is true, then all addresses are treated as equals. If a
# connection fails, the next address will be tried and if successful will
# remain connected until it fails
#round_robin false
# Set the client id for this bridge connection. If not defined,
# this defaults to 'name.hostname' where name is the connection
# name and hostname is the hostname of this computer.
#clientid
# Set the clean session variable for this bridge.
# When set to true, when the bridge disconnects for any reason, all
# messages and subscriptions will be cleaned up on the remote
# broker. Note that with cleansession set to true, there may be a
# significant amount of retained messages sent when the bridge
# reconnects after losing its connection.
# When set to false, the subscriptions and messages are kept on the
# remote broker, and delivered when the bridge reconnects.
#cleansession false
# If set to true, publish notification messages to the local and remote brokers
# giving information about the state of the bridge connection. Retained
# messages are published to the topic $SYS/broker/connection/<clientid>/state
# unless the notification_topic option is used.
# If the message is 1 then the connection is active, or 0 if the connection has
# failed.
#notifications true
# Choose the topic on which notification messages for this bridge are
# published. If not set, messages are published on the topic
# $SYS/broker/connection/<clientid>/state
#notification_topic
# Set the keepalive interval for this bridge connection, in
# seconds.
#keepalive_interval 60
# Set the start type of the bridge. This controls how the bridge starts and
# can be one of three types: automatic, lazy and once. Note that RSMB provides
# a fourth start type "manual" which isn't currently supported by mosquitto.
#
# "automatic" is the default start type and means that the bridge connection
# will be started automatically when the broker starts and also restarted
# after a short delay (30 seconds) if the connection fails.
#
# Bridges using the "lazy" start type will be started automatically when the
# number of queued messages exceeds the number set with the "threshold"
# parameter. It will be stopped automatically after the time set by the
# "idle_timeout" parameter. Use this start type if you wish the connection to
# only be active when it is needed.
#
# A bridge using the "once" start type will be started automatically when the
# broker starts but will not be restarted if the connection fails.
#start_type automatic
# Set the amount of time a bridge using the automatic start type will wait
# until attempting to reconnect. Defaults to 30 seconds.
#restart_timeout 30
# Set the amount of time a bridge using the lazy start type must be idle before
# it will be stopped. Defaults to 60 seconds.
#idle_timeout 60
# Set the number of messages that need to be queued for a bridge with lazy
# start type to be restarted. Defaults to 10 messages.
# Must be less than max_queued_messages.
#threshold 10
# If try_private is set to true, the bridge will attempt to indicate to the
# remote broker that it is a bridge not an ordinary client. If successful, this
# means that loop detection will be more effective and that retained messages
# will be propagated correctly. Not all brokers support this feature so it may
# be necessary to set try_private to false if your bridge does not connect
# properly.
#try_private true
# Set the username to use when connecting to an MQTT v3.1 broker
# that requires authentication.
#username
# Set the password to use when connecting to an MQTT v3.1 broker
# that requires authentication. This option is only used if
# username is also set.
#password
# -----------------------------------------------------------------
# Certificate based SSL/TLS support
# -----------------------------------------------------------------
# Either bridge_cafile or bridge_capath must be defined to enable TLS support
# for this bridge.
# bridge_cafile defines the path to a file containing the
# Certificate Authority certificates that have signed the remote broker
# certificate.
# bridge_capath defines a directory that will be searched for files containing
# the CA certificates. For bridge_capath to work correctly, the certificate
# files must have ".crt" as the file ending and you must run "c_rehash <path to
# capath>" each time you add/remove a certificate.
#bridge_cafile
#bridge_capath
# Path to the PEM encoded client certificate, if required by the remote broker.
#bridge_certfile
# Path to the PEM encoded client private key, if required by the remote broker.
#bridge_keyfile
# When using certificate based encryption, bridge_insecure disables
# verification of the server hostname in the server certificate. This can be
# useful when testing initial server configurations, but makes it possible for
# a malicious third party to impersonate your server through DNS spoofing, for
# example. Use this option in testing only. If you need to resort to using this
# option in a production environment, your setup is at fault and there is no
# point using encryption.
#bridge_insecure false
# -----------------------------------------------------------------
# PSK based SSL/TLS support
# -----------------------------------------------------------------
# Pre-shared-key encryption provides an alternative to certificate based
# encryption. A bridge can be configured to use PSK with the bridge_identity
# and bridge_psk options. These are the client PSK identity, and pre-shared-key
# in hexadecimal format with no "0x". Only one of certificate and PSK based
# encryption can be used on one
# bridge at once.
#bridge_identity
#bridge_psk
# =================================================================
# External config files
# =================================================================
# External configuration files may be included by using the
# include_dir option. This defines a directory that will be searched
# for config files. All files that end in '.conf' will be loaded as
# a configuration file. It is best to have this as the last option
# in the main file. This option will only be processed from the main
# configuration file. The directory specified must not contain the
# main configuration file.
#include_dir
# =================================================================
# Unsupported rsmb options - for the future
# =================================================================
#addresses
#round_robin
# =================================================================
# rsmb options - unlikely to ever be supported
# =================================================================
#ffdc_output
#max_log_entries
#trace_level
#trace_output

View File

@@ -0,0 +1,3 @@
roger:$6$clQ4Ocu312S0qWgl$Cv2wUxgEN73c6C6jlBkswqR4AkHsvDLWvtEXZZ8NpsBLgP1WAo/qA+WXcmEN/mjDNgdUwcxRAveqNMs2xUVQYA==
sub_client:$6$U+qg0/32F0g2Fh+n$fBPSkq/rfNyEQ/TkEjRgwGTTVBpvNhKSyGShovH9KHewsvJ731tD5Zx26IHhR5RYCICt0L9qBW0/KK31UkCliw==
pub_client:$6$vxQ89y+7WrsnL2yn$fSPMmEZn9TSrC8s/jaPmxJ9NijWpkP2e7bMJLz78JXR1vW2x8+T3FZ23byJA6xs5Mt+LeOybAHwcUv0OCl40rA==

View File

@@ -0,0 +1,105 @@
/*
* Copyright (c) 2013 IBM Corp.
*
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Eclipse Public License v1.0
* which accompanies this distribution, and is available at
* http://www.eclipse.org/legal/epl-v10.html
*
* Contributors:
* Seth Hoenig
* Allan Stockdill-Mander
* Mike Robertson
*/
/*----------------------------------------------------------------------
This sample is designed to demonstrate the ability to set individual
callbacks on a per-subscription basis. There are three handlers in use:
brokerLoadHandler - $SYS/broker/load/#
brokerConnectionHandler - $SYS/broker/connection/#
brokerClientHandler - $SYS/broker/clients/#
The client will receive 100 messages total from those subscriptions,
and then print the total number of messages received from each.
It may take a few moments for the sample to complete running, as it
must wait for messages to be published.
-----------------------------------------------------------------------*/
package main
import (
"fmt"
"os"
MQTT "git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git"
)
var brokerLoad = make(chan bool)
var brokerConnection = make(chan bool)
var brokerClients = make(chan bool)
func brokerLoadHandler(client *MQTT.Client, msg MQTT.Message) {
brokerLoad <- true
fmt.Printf("BrokerLoadHandler ")
fmt.Printf("[%s] ", msg.Topic())
fmt.Printf("%s\n", msg.Payload())
}
func brokerConnectionHandler(client *MQTT.Client, msg MQTT.Message) {
brokerConnection <- true
fmt.Printf("BrokerConnectionHandler ")
fmt.Printf("[%s] ", msg.Topic())
fmt.Printf("%s\n", msg.Payload())
}
func brokerClientsHandler(client *MQTT.Client, msg MQTT.Message) {
brokerClients <- true
fmt.Printf("BrokerClientsHandler ")
fmt.Printf("[%s] ", msg.Topic())
fmt.Printf("%s\n", msg.Payload())
}
func main() {
opts := MQTT.NewClientOptions().AddBroker("tcp://iot.eclipse.org:1883").SetClientID("router-sample")
opts.SetCleanSession(true)
c := MQTT.NewClient(opts)
if token := c.Connect(); token.Wait() && token.Error() != nil {
panic(token.Error())
}
if token := c.Subscribe("$SYS/broker/load/#", 0, brokerLoadHandler); token.Wait() && token.Error() != nil {
fmt.Println(token.Error())
os.Exit(1)
}
if token := c.Subscribe("$SYS/broker/connection/#", 0, brokerConnectionHandler); token.Wait() && token.Error() != nil {
fmt.Println(token.Error())
os.Exit(1)
}
if token := c.Subscribe("$SYS/broker/clients/#", 0, brokerClientsHandler); token.Wait() && token.Error() != nil {
fmt.Println(token.Error())
os.Exit(1)
}
loadCount := 0
connectionCount := 0
clientsCount := 0
for i := 0; i < 100; i++ {
select {
case <-brokerLoad:
loadCount++
case <-brokerConnection:
connectionCount++
case <-brokerClients:
clientsCount++
}
}
fmt.Printf("Received %3d Broker Load messages\n", loadCount)
fmt.Printf("Received %3d Broker Connection messages\n", connectionCount)
fmt.Printf("Received %3d Broker Clients messages\n", clientsCount)
c.Disconnect(250)
}

View File

@@ -0,0 +1,130 @@
/*
* Copyright (c) 2013 IBM Corp.
*
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Eclipse Public License v1.0
* which accompanies this distribution, and is available at
* http://www.eclipse.org/legal/epl-v10.html
*
* Contributors:
* Seth Hoenig
* Allan Stockdill-Mander
* Mike Robertson
*/
package main
import (
"flag"
"fmt"
"os"
MQTT "git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git"
)
/*
Options:
[-help] Display help
[-a pub|sub] Action pub (publish) or sub (subscribe)
[-m <message>] Payload to send
[-n <number>] Number of messages to send or receive
[-q 0|1|2] Quality of Service
[-clean] CleanSession (true if -clean is present)
[-id <clientid>] CliendID
[-user <user>] User
[-password <password>] Password
[-broker <uri>] Broker URI
[-topic <topic>] Topic
[-store <path>] Store Directory
*/
func main() {
topic := flag.String("topic", "", "The topic name to/from which to publish/subscribe")
broker := flag.String("broker", "tcp://iot.eclipse.org:1883", "The broker URI. ex: tcp://10.10.1.1:1883")
password := flag.String("password", "", "The password (optional)")
user := flag.String("user", "", "The User (optional)")
id := flag.String("id", "testgoid", "The ClientID (optional)")
cleansess := flag.Bool("clean", false, "Set Clean Session (default false)")
qos := flag.Int("qos", 0, "The Quality of Service 0,1,2 (default 0)")
num := flag.Int("num", 1, "The number of messages to publish or subscribe (default 1)")
payload := flag.String("message", "", "The message text to publish (default empty)")
action := flag.String("action", "", "Action publish or subscribe (required)")
store := flag.String("store", ":memory:", "The Store Directory (default use memory store)")
flag.Parse()
if *action != "pub" && *action != "sub" {
fmt.Println("Invalid setting for -action, must be pub or sub")
return
}
if *topic == "" {
fmt.Println("Invalid setting for -topic, must not be empty")
return
}
fmt.Printf("Sample Info:\n")
fmt.Printf("\taction: %s\n", *action)
fmt.Printf("\tbroker: %s\n", *broker)
fmt.Printf("\tclientid: %s\n", *id)
fmt.Printf("\tuser: %s\n", *user)
fmt.Printf("\tpassword: %s\n", *password)
fmt.Printf("\ttopic: %s\n", *topic)
fmt.Printf("\tmessage: %s\n", *payload)
fmt.Printf("\tqos: %d\n", *qos)
fmt.Printf("\tcleansess: %v\n", *cleansess)
fmt.Printf("\tnum: %d\n", *num)
fmt.Printf("\tstore: %s\n", *store)
opts := MQTT.NewClientOptions()
opts.AddBroker(*broker)
opts.SetClientID(*id)
opts.SetUsername(*user)
opts.SetPassword(*password)
opts.SetCleanSession(*cleansess)
if *store != ":memory:" {
opts.SetStore(MQTT.NewFileStore(*store))
}
if *action == "pub" {
client := MQTT.NewClient(opts)
if token := client.Connect(); token.Wait() && token.Error() != nil {
panic(token.Error())
}
fmt.Println("Sample Publisher Started")
for i := 0; i < *num; i++ {
fmt.Println("---- doing publish ----")
token := client.Publish(*topic, byte(*qos), false, *payload)
token.Wait()
}
client.Disconnect(250)
fmt.Println("Sample Publisher Disconnected")
} else {
receiveCount := 0
choke := make(chan [2]string)
opts.SetDefaultPublishHandler(func(client *MQTT.Client, msg MQTT.Message) {
choke <- [2]string{msg.Topic(), string(msg.Payload())}
})
client := MQTT.NewClient(opts)
if token := client.Connect(); token.Wait() && token.Error() != nil {
panic(token.Error())
}
if token := client.Subscribe(*topic, byte(*qos), nil); token.Wait() && token.Error() != nil {
fmt.Println(token.Error())
os.Exit(1)
}
for receiveCount < *num {
incoming := <-choke
fmt.Printf("RECEIVED TOPIC: %s MESSAGE: %s\n", incoming[0], incoming[1])
receiveCount++
}
client.Disconnect(250)
fmt.Println("Sample Subscriber Disconnected")
}
}

View File

@@ -0,0 +1,150 @@
Certificate:
Data:
Version: 3 (0x2)
Serial Number: 1 (0x1)
Signature Algorithm: sha1WithRSAEncryption
Issuer: C=US, ST=Dummy, L=Dummy, O=Dummy, OU=Dummy, CN=Dummy CA
Validity
Not Before: Oct 21 19:24:23 2013 GMT
Not After : Sep 25 19:24:23 2018 GMT
Subject: C=US, ST=Dummy, L=Dummy, O=Dummy, OU=Dummy, CN=Dummy CA
Subject Public Key Info:
Public Key Algorithm: rsaEncryption
Public-Key: (2048 bit)
Modulus:
00:c2:d1:d0:31:dc:93:c3:ad:88:0d:f8:93:fe:cc:
aa:04:1d:85:aa:c3:bb:bd:87:04:f0:42:67:14:34:
4a:56:94:2b:bf:d0:6b:72:30:38:39:35:20:8c:e3:
7e:65:82:b0:7e:3e:1d:f1:18:82:b7:d6:19:59:43:
ed:81:be:eb:51:44:fc:77:9e:37:ad:e1:a0:18:b9:
4b:59:79:90:81:a4:e4:52:2f:fc:e2:ff:98:10:5e:
d5:13:9a:16:62:1a:e0:cb:ab:1d:ae:da:d1:40:d4:
97:b1:e6:e3:f1:97:2c:2a:52:73:ab:d0:a2:15:f3:
1e:9a:b0:67:d0:62:67:4b:74:b0:bb:8f:ef:9e:32:
6a:4c:27:4e:82:7c:16:66:ce:06:e9:a3:d9:36:4f:
f4:3e:bc:80:00:93:c1:ca:31:cf:03:68:d4:e5:8b:
38:45:b6:1b:35:b0:c0:e9:4a:62:75:83:01:aa:b9:
c1:0b:c0:ee:97:c0:73:23:cd:34:ec:bb:3c:95:35:
c8:2d:69:ff:86:d8:1f:c8:04:7e:18:de:62:c2:4b:
37:c6:aa:8e:03:bf:2b:0d:97:20:2a:75:47:ec:98:
29:3c:64:52:ef:91:8b:63:0f:6a:f8:c2:9d:08:6a:
61:68:6f:64:9a:56:b2:0a:bc:7b:59:3d:7f:fd:ba:
12:4b
Exponent: 65537 (0x10001)
X509v3 extensions:
X509v3 Subject Key Identifier:
5B:BB:3E:8E:2D:90:AD:AE:58:07:FF:53:00:18:98:FF:44:84:4C:BA
X509v3 Authority Key Identifier:
keyid:5B:BB:3E:8E:2D:90:AD:AE:58:07:FF:53:00:18:98:FF:44:84:4C:BA
X509v3 Basic Constraints:
CA:TRUE
Signature Algorithm: sha1WithRSAEncryption
3c:89:0b:bd:49:10:a6:1a:f6:2a:4b:5f:02:3d:ee:f3:19:4f:
c9:10:79:9c:01:ef:88:22:3d:03:5b:1a:14:46:b6:7f:9b:af:
a5:99:1a:d4:d4:9b:d6:6f:c1:fe:96:8f:9a:9e:47:42:b4:ee:
21:56:6a:c4:92:38:6c:81:cd:8e:31:43:86:7c:97:15:90:80:
d8:21:f0:46:be:2a:2f:f2:96:07:85:74:a8:fa:1b:78:8f:80:
c1:5e:bc:d9:06:c2:33:9e:8e:f9:08:dd:43:7b:6f:5a:22:67:
46:78:5d:fb:4a:4e:c2:c6:29:94:17:53:a6:c5:a9:d6:67:06:
4f:07:ef:da:5b:45:21:83:cb:31:b2:dc:dc:ac:13:19:98:3f:
98:5f:2c:b4:b4:da:d4:43:d7:a9:1a:6e:b6:cf:be:85:a8:80:
1f:8a:c1:95:8a:83:a4:af:d2:23:4a:b6:18:87:4e:28:31:36:
03:2c:bf:e4:9e:b6:75:fd:c4:68:ed:4d:d5:a8:fa:a5:81:13:
17:1c:43:67:02:1c:d0:e6:00:6e:8b:13:e6:60:1f:ba:40:78:
93:25:ca:59:5a:71:cc:58:d4:52:63:1d:b3:3c:ce:37:f1:89:
78:fc:13:fa:b3:ea:22:af:17:68:8a:a1:59:57:f5:1a:49:6e:
b9:f6:5f:b3
-----BEGIN CERTIFICATE-----
MIIDizCCAnOgAwIBAgIBATANBgkqhkiG9w0BAQUFADBgMQswCQYDVQQGEwJVUzEO
MAwGA1UECAwFRHVtbXkxDjAMBgNVBAcMBUR1bW15MQ4wDAYDVQQKDAVEdW1teTEO
MAwGA1UECwwFRHVtbXkxETAPBgNVBAMMCER1bW15IENBMB4XDTEzMTAyMTE5MjQy
M1oXDTE4MDkyNTE5MjQyM1owYDELMAkGA1UEBhMCVVMxDjAMBgNVBAgMBUR1bW15
MQ4wDAYDVQQHDAVEdW1teTEOMAwGA1UECgwFRHVtbXkxDjAMBgNVBAsMBUR1bW15
MREwDwYDVQQDDAhEdW1teSBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoC
ggEBAMLR0DHck8OtiA34k/7MqgQdharDu72HBPBCZxQ0SlaUK7/Qa3IwODk1IIzj
fmWCsH4+HfEYgrfWGVlD7YG+61FE/HeeN63hoBi5S1l5kIGk5FIv/OL/mBBe1ROa
FmIa4MurHa7a0UDUl7Hm4/GXLCpSc6vQohXzHpqwZ9BiZ0t0sLuP754yakwnToJ8
FmbOBumj2TZP9D68gACTwcoxzwNo1OWLOEW2GzWwwOlKYnWDAaq5wQvA7pfAcyPN
NOy7PJU1yC1p/4bYH8gEfhjeYsJLN8aqjgO/Kw2XICp1R+yYKTxkUu+Ri2MPavjC
nQhqYWhvZJpWsgq8e1k9f/26EksCAwEAAaNQME4wHQYDVR0OBBYEFFu7Po4tkK2u
WAf/UwAYmP9EhEy6MB8GA1UdIwQYMBaAFFu7Po4tkK2uWAf/UwAYmP9EhEy6MAwG
A1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBADyJC71JEKYa9ipLXwI97vMZ
T8kQeZwB74giPQNbGhRGtn+br6WZGtTUm9Zvwf6Wj5qeR0K07iFWasSSOGyBzY4x
Q4Z8lxWQgNgh8Ea+Ki/ylgeFdKj6G3iPgMFevNkGwjOejvkI3UN7b1oiZ0Z4XftK
TsLGKZQXU6bFqdZnBk8H79pbRSGDyzGy3NysExmYP5hfLLS02tRD16kabrbPvoWo
gB+KwZWKg6Sv0iNKthiHTigxNgMsv+SetnX9xGjtTdWo+qWBExccQ2cCHNDmAG6L
E+ZgH7pAeJMlyllaccxY1FJjHbM8zjfxiXj8E/qz6iKvF2iKoVlX9RpJbrn2X7M=
-----END CERTIFICATE-----
Certificate:
Data:
Version: 3 (0x2)
Serial Number: 1 (0x1)
Signature Algorithm: sha1WithRSAEncryption
Issuer: C=US, ST=Dummy, L=Dummy, O=Dummy, OU=Dummy, CN=Dummy CA
Validity
Not Before: Oct 21 19:24:23 2013 GMT
Not After : Sep 25 19:24:23 2018 GMT
Subject: C=US, ST=Dummy, L=Dummy, O=Dummy, OU=Dummy, CN=Dummy Intermediate CA
Subject Public Key Info:
Public Key Algorithm: rsaEncryption
Public-Key: (2048 bit)
Modulus:
00:cf:7d:92:07:a5:56:1b:6f:4c:f3:34:c2:12:c2:
34:62:3b:69:aa:a6:0c:c6:70:5b:93:bc:dc:41:98:
61:87:61:36:be:8c:08:dd:31:a9:33:76:d3:66:3e:
77:60:1e:ed:9e:e1:e5:ef:bf:17:91:ac:0c:63:07:
01:ab:30:67:bc:16:a6:2f:79:f0:61:8c:79:2d:3c:
98:60:74:61:c4:5f:60:44:85:71:92:9d:cc:7b:14:
39:74:aa:44:f9:9f:ae:f6:c7:8d:c3:01:47:53:24:
ac:7b:a2:f6:c5:7d:65:37:40:0b:20:c8:d4:14:cd:
f8:f4:57:ea:23:70:f4:e3:99:2b:1c:9a:67:37:ed:
93:c7:a7:7c:86:90:f7:ae:fc:6f:4b:18:dc:d5:eb:
f3:68:33:d6:78:14:d1:ca:a7:06:7d:75:34:f6:c0:
d4:15:1b:21:2b:78:d9:76:24:a5:f0:c6:13:c8:1e:
4a:c8:ca:77:34:4e:f8:fa:49:5f:6c:e1:66:a8:65:
f0:8c:bc:44:20:03:ac:af:4a:61:a5:39:48:51:1b:
cb:d8:22:29:60:27:47:42:fc:bf:6a:77:65:58:09:
20:82:1c:d1:16:5e:5a:18:ea:99:61:8e:93:94:27:
30:20:dd:44:03:50:43:b4:ec:a3:0f:ee:91:69:d7:
b1:5b
Exponent: 65537 (0x10001)
X509v3 extensions:
X509v3 Basic Constraints:
CA:TRUE
Signature Algorithm: sha1WithRSAEncryption
39:a0:8d:2f:68:22:1d:4f:3e:db:f1:9b:29:20:77:23:f8:21:
34:17:84:00:88:a8:3e:a1:4d:84:94:90:96:02:e6:6a:b4:20:
51:a0:66:20:38:05:18:aa:2a:3e:9a:50:60:af:eb:4a:70:ac:
9b:59:30:d5:17:14:9c:b4:91:6a:1b:c3:45:8a:dd:cd:2f:c6:
c5:8c:fe:d0:76:20:63:a4:97:db:e3:2a:8e:c1:3d:c8:b6:06:
2d:49:7a:d9:8a:de:16:ea:5d:5f:fb:41:79:0d:8f:d2:23:00:
d9:b9:6f:93:45:bb:74:17:ea:6b:72:13:01:86:fe:8d:7e:8f:
27:71:76:a9:37:6d:6c:90:5a:3f:d9:6d:4d:6c:a4:64:7a:ea:
82:c9:87:ee:6a:d0:6e:30:05:7f:19:1d:19:31:a9:9a:ce:21:
84:da:47:c7:a0:66:12:e8:7e:57:69:5d:9c:24:e5:46:3c:bf:
37:f6:88:c3:b1:42:de:3b:81:ed:f5:ae:e2:23:9e:c2:89:a1:
e7:5c:1d:49:0f:ed:ae:55:60:0e:4e:4c:e9:8a:64:e6:ae:c5:
d1:99:a7:70:4c:7e:5d:53:ac:88:2c:0f:0b:21:94:1a:32:f9:
a1:cc:1e:67:98:6b:b6:e9:b1:b9:4b:46:02:b1:65:c9:49:83:
80:bd:b9:70
-----BEGIN CERTIFICATE-----
MIIDWDCCAkCgAwIBAgIBATANBgkqhkiG9w0BAQUFADBgMQswCQYDVQQGEwJVUzEO
MAwGA1UECAwFRHVtbXkxDjAMBgNVBAcMBUR1bW15MQ4wDAYDVQQKDAVEdW1teTEO
MAwGA1UECwwFRHVtbXkxETAPBgNVBAMMCER1bW15IENBMB4XDTEzMTAyMTE5MjQy
M1oXDTE4MDkyNTE5MjQyM1owbTELMAkGA1UEBhMCVVMxDjAMBgNVBAgMBUR1bW15
MQ4wDAYDVQQHDAVEdW1teTEOMAwGA1UECgwFRHVtbXkxDjAMBgNVBAsMBUR1bW15
MR4wHAYDVQQDDBVEdW1teSBJbnRlcm1lZGlhdGUgQ0EwggEiMA0GCSqGSIb3DQEB
AQUAA4IBDwAwggEKAoIBAQDPfZIHpVYbb0zzNMISwjRiO2mqpgzGcFuTvNxBmGGH
YTa+jAjdMakzdtNmPndgHu2e4eXvvxeRrAxjBwGrMGe8FqYvefBhjHktPJhgdGHE
X2BEhXGSncx7FDl0qkT5n672x43DAUdTJKx7ovbFfWU3QAsgyNQUzfj0V+ojcPTj
mSscmmc37ZPHp3yGkPeu/G9LGNzV6/NoM9Z4FNHKpwZ9dTT2wNQVGyEreNl2JKXw
xhPIHkrIync0Tvj6SV9s4WaoZfCMvEQgA6yvSmGlOUhRG8vYIilgJ0dC/L9qd2VY
CSCCHNEWXloY6plhjpOUJzAg3UQDUEO07KMP7pFp17FbAgMBAAGjEDAOMAwGA1Ud
EwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBADmgjS9oIh1PPtvxmykgdyP4ITQX
hACIqD6hTYSUkJYC5mq0IFGgZiA4BRiqKj6aUGCv60pwrJtZMNUXFJy0kWobw0WK
3c0vxsWM/tB2IGOkl9vjKo7BPci2Bi1JetmK3hbqXV/7QXkNj9IjANm5b5NFu3QX
6mtyEwGG/o1+jydxdqk3bWyQWj/ZbU1spGR66oLJh+5q0G4wBX8ZHRkxqZrOIYTa
R8egZhLofldpXZwk5UY8vzf2iMOxQt47ge31ruIjnsKJoedcHUkP7a5VYA5OTOmK
ZOauxdGZp3BMfl1TrIgsDwshlBoy+aHMHmeYa7bpsblLRgKxZclJg4C9uXA=
-----END CERTIFICATE-----

View File

@@ -0,0 +1,9 @@
Certificate structure:
Root CA
|
|-> Intermediate CA
|
|-> Server
|
|-> Client

View File

@@ -0,0 +1,20 @@
-----BEGIN CERTIFICATE-----
MIIDRzCCAi8CAQIwDQYJKoZIhvcNAQEFBQAwbTELMAkGA1UEBhMCVVMxDjAMBgNV
BAgMBUR1bW15MQ4wDAYDVQQHDAVEdW1teTEOMAwGA1UECgwFRHVtbXkxDjAMBgNV
BAsMBUR1bW15MR4wHAYDVQQDDBVEdW1teSBJbnRlcm1lZGlhdGUgQ0EwHhcNMTMx
MDIxMTkyNDIzWhcNMTgwOTI1MTkyNDIzWjBmMQswCQYDVQQGEwJVUzEOMAwGA1UE
CAwFRHVtbXkxDjAMBgNVBAcMBUR1bW15MQ4wDAYDVQQKDAVEdW1teTEOMAwGA1UE
CwwFRHVtbXkxFzAVBgNVBAMMDkR1bW15IChjbGllbnQpMIIBIjANBgkqhkiG9w0B
AQEFAAOCAQ8AMIIBCgKCAQEA4J/+eKqsjK0QS+cSDa5Fh4XM4THy812JkWMySA5r
bFxHZ5ye36/IuwyRQ0Yn2DvhhsDR5K7yz8K+Yfp9A6WRBkyHK/sy/8vurQHeDH3y
lLtHCLi5nfyt2fDxWOYwFrS1giGn2IxJIHBAWu4cBODCkqwqAp92+Lqp3Sn+D+Fb
maHEU3LHua8OIJiSeAIHo/jPqfHFqZxK1bXhGCSQKvUZCaTftsqDtn+LZSElqj1y
5/cnc7XGsTf8ml/+FDMX1aSAHf+pu+UAp9JqOXOM60A5JIpYu3Lsejp1RppyPJYP
zC4nSN8R2LOdDChP2MB7f1/sXRGlLM/X3Vi4X+c6xQ85TQIDAQABMA0GCSqGSIb3
DQEBBQUAA4IBAQAMWt9qMUOY5z1uyYcjUnconPHLM9MADCZI2sRbfdBOBHEnTVKv
Y63SWnCt8TRJb01LKLIEys6pW1NUlxr6b+FwicNmycR0L8b63cmNXg2NmSZsnK9C
fGT6BbbDdVPYjvmghpSd3soBGBLPsJvaFc6UL5tunm+hT7PxWjDxHZEiE18PTs05
Vpp/ytILzhoXvJeFOWQHIdf4DLR5izGMNTKdQzgg1eBq2vKgjJIlEZ3j/AyHkJLE
qFip1tyc0PRzgKYFLWttaZzakCLJOGuxtvYB+GrixVM7U23p5LQbLE0KX7fe2Gql
xKMfSID5NUDNf1SuSrrGLD3gfnJEKVB8TVBk
-----END CERTIFICATE-----

View File

@@ -0,0 +1,27 @@
-----BEGIN RSA PRIVATE KEY-----
MIIEpAIBAAKCAQEA4J/+eKqsjK0QS+cSDa5Fh4XM4THy812JkWMySA5rbFxHZ5ye
36/IuwyRQ0Yn2DvhhsDR5K7yz8K+Yfp9A6WRBkyHK/sy/8vurQHeDH3ylLtHCLi5
nfyt2fDxWOYwFrS1giGn2IxJIHBAWu4cBODCkqwqAp92+Lqp3Sn+D+FbmaHEU3LH
ua8OIJiSeAIHo/jPqfHFqZxK1bXhGCSQKvUZCaTftsqDtn+LZSElqj1y5/cnc7XG
sTf8ml/+FDMX1aSAHf+pu+UAp9JqOXOM60A5JIpYu3Lsejp1RppyPJYPzC4nSN8R
2LOdDChP2MB7f1/sXRGlLM/X3Vi4X+c6xQ85TQIDAQABAoIBABosCiZdHIW3lHKD
leLqL0e/G0QR4dDhUSoTeMRUiceyaM91vD0r6iOBL1u7TOEw+PIOfWY7zCbQ9gXM
fcxy+hbVy9ogBq0vQbv+v7SM6DrUJ06o11fFHSyLmlNVXr0GiS+EZF4i2lJhQd5W
aAVZetJEJRDxK5eHiEswnV2UUGvx6VCpFILL0JVGxWY7oOPxiiBLl+cmfRZdTfGx
46VzQvBu7N8hGpCIsljuVFP/DxR7c+2oyrtFaFSMZBMNI8fICgkb2QeLk/XUBXtn
0bDttgmOP/BvnNAor7nIRoeer/7kbXc9jOsgXwnvDKPapltQddL+exycXzbIjLuY
Z2SFsDECgYEA+2A4QGV0biqdICAoKCHCHCU/CrdDUQiQDHqRU6/nhka7MFPSl4Wy
9oISRrYZhKIbSbaXwTW5ZcYq8Hpn/yGYIWlINP9sjprnOWPE7L74lac+PFWXNMUI
jNJOJkLK1IeppByXAt5ekGBrG556bhzRCJsTjYsyUR/r/bMEF1FD8WMCgYEA5MHM
hqmkDK5CbklVaPonNc251Lx+HSzzQ40WExC/PrCczRaZMKlhmyKZfWJCInQsUDln
w6Lqa5UnwZV2HYAF30VZYQsq84ulNnx1/36BEZyIimfAL1WHvKeGWjGsZqniXxxb
Os5wEMAvxk0SWVrR5v6YpBDv3t9+lLg/bzBOAY8CgYEAuZ0q7CH9/vroWrhj7n4+
3pmCG1+HDWbNNumqNalFxBimT+EVN1058FvLMvtzjERG8f8pvzj0VPom6rr336Pm
uYUMFFYmyoYHBpFs74Nz+s0rX1Gz/PsgfRstKYNYUeZ6lPunZi7clK8dZ591t6j/
kOMxZOrLlKuFjieJdc5D5RECgYAVTzxXOwxOJhmIHoq3Sb5HU8/A0oJJA3vxyf3J
buDx3Q/uRvGkR9MQ2YtE09dnUD0kiARzhASkWvOmI98p5lglsVcfJCQvJc4RIkz3
rPgnBNbvVbTgc+4+E7j/Q+tUcPTmeUTCWKK13MFWjq1r53rwMr1TY0SFFXq8LeGy
4OQTXwKBgQDCuPN3Q+EJusYy7TXt0WicY/xyu15s1216N7PmRKFr/WAn2JdAfjbD
JKDwVqo0AQiEDAobJk0JMPs+ENK2d58GsybCK4QGAh6z5FGunb5T432YfnoXtL3J
ZKVvkf7eowvokTIeiDf3XrCPajLDBpo88Xax+RH03US7XRdu/fVzMA==
-----END RSA PRIVATE KEY-----

View File

@@ -0,0 +1,20 @@
-----BEGIN CERTIFICATE-----
MIIDWDCCAkCgAwIBAgIBATANBgkqhkiG9w0BAQUFADBgMQswCQYDVQQGEwJVUzEO
MAwGA1UECAwFRHVtbXkxDjAMBgNVBAcMBUR1bW15MQ4wDAYDVQQKDAVEdW1teTEO
MAwGA1UECwwFRHVtbXkxETAPBgNVBAMMCER1bW15IENBMB4XDTEzMTAyMTE5MjQy
M1oXDTE4MDkyNTE5MjQyM1owbTELMAkGA1UEBhMCVVMxDjAMBgNVBAgMBUR1bW15
MQ4wDAYDVQQHDAVEdW1teTEOMAwGA1UECgwFRHVtbXkxDjAMBgNVBAsMBUR1bW15
MR4wHAYDVQQDDBVEdW1teSBJbnRlcm1lZGlhdGUgQ0EwggEiMA0GCSqGSIb3DQEB
AQUAA4IBDwAwggEKAoIBAQDPfZIHpVYbb0zzNMISwjRiO2mqpgzGcFuTvNxBmGGH
YTa+jAjdMakzdtNmPndgHu2e4eXvvxeRrAxjBwGrMGe8FqYvefBhjHktPJhgdGHE
X2BEhXGSncx7FDl0qkT5n672x43DAUdTJKx7ovbFfWU3QAsgyNQUzfj0V+ojcPTj
mSscmmc37ZPHp3yGkPeu/G9LGNzV6/NoM9Z4FNHKpwZ9dTT2wNQVGyEreNl2JKXw
xhPIHkrIync0Tvj6SV9s4WaoZfCMvEQgA6yvSmGlOUhRG8vYIilgJ0dC/L9qd2VY
CSCCHNEWXloY6plhjpOUJzAg3UQDUEO07KMP7pFp17FbAgMBAAGjEDAOMAwGA1Ud
EwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBADmgjS9oIh1PPtvxmykgdyP4ITQX
hACIqD6hTYSUkJYC5mq0IFGgZiA4BRiqKj6aUGCv60pwrJtZMNUXFJy0kWobw0WK
3c0vxsWM/tB2IGOkl9vjKo7BPci2Bi1JetmK3hbqXV/7QXkNj9IjANm5b5NFu3QX
6mtyEwGG/o1+jydxdqk3bWyQWj/ZbU1spGR66oLJh+5q0G4wBX8ZHRkxqZrOIYTa
R8egZhLofldpXZwk5UY8vzf2iMOxQt47ge31ruIjnsKJoedcHUkP7a5VYA5OTOmK
ZOauxdGZp3BMfl1TrIgsDwshlBoy+aHMHmeYa7bpsblLRgKxZclJg4C9uXA=
-----END CERTIFICATE-----

View File

@@ -0,0 +1,27 @@
-----BEGIN RSA PRIVATE KEY-----
MIIEogIBAAKCAQEAz32SB6VWG29M8zTCEsI0YjtpqqYMxnBbk7zcQZhhh2E2vowI
3TGpM3bTZj53YB7tnuHl778XkawMYwcBqzBnvBamL3nwYYx5LTyYYHRhxF9gRIVx
kp3MexQ5dKpE+Z+u9seNwwFHUySse6L2xX1lN0ALIMjUFM349FfqI3D045krHJpn
N+2Tx6d8hpD3rvxvSxjc1evzaDPWeBTRyqcGfXU09sDUFRshK3jZdiSl8MYTyB5K
yMp3NE74+klfbOFmqGXwjLxEIAOsr0phpTlIURvL2CIpYCdHQvy/andlWAkgghzR
Fl5aGOqZYY6TlCcwIN1EA1BDtOyjD+6RadexWwIDAQABAoIBAEs6OsS85DBENUEE
QszsTnPDGLd/Rqh3uiwhUDYUGmAsFd4WBWy1AaSgE1tBkKRv8jUlr+kxfkkZeNA6
jRdVEHc4Ov6Blm63sIN/Mbve1keNUOjm/NtsjOOe3In45dMfWx8sELC/+O0jIcod
tpy5rwXOGXrEdWgpmXZ1nXVGEfOmQH3eGEPkqbY1I4YlAoXD0mc5fNQQrn7qrogH
M5USCnC44yIIF0Yube2Fg0Cem41vzIvENAlZC273gyW+pQwez0uma2LaCWmkEz1N
sESrNSQ4yeQnDQYlgX2w3RRpqql4GDzAdISL2WJcNhW6KJ72B0SQ1ny/TmQgZePG
Ojv1T0ECgYEA9CXqKyXBSPF+Wdc/fNagrIi6tcNkLAN2/p5J3Z6TtbZGjItoMlDX
c+hwHobcI3GZLMlxlBx7ePc7cKgaMDXrl8BZZjFoyEV9OHOLicfNkLFmBIZ14gtX
bGZYDuCcal46r7IKRjT8lcYWCoLJnI9vLEII7Q7P/eBgcntw3+h/ziECgYEA2ZAa
bp9d0xBaOXq/E341guxNG49R09/DeZ/2CEM+V1pMD8OVH9cvxrBdDLUmAnrqeGTh
Djoi1UEbOVAV6/dXbTQHrla+HF4Uq+t9tV+mt68TEa54PQ/ERt5ih3nZGBiqZ6rX
SGeyZmIXMLIZEs2dIbJ2DmLcZj6Tjxkd/PxPt/sCgYBGczZaEv/uK3k5NWplfI1K
m/28e1BJfwp0OHq6D4sx8RH0djmv4zH4iUbpGCMnuxznFo3Gnl1mr3igbnF4HecI
mAF0AqfoulyC0JygOl5v9TCp957Ghl1Is1OPn3KjIuOuVSKv1ZRZJ5qul8TTf3Qm
AjwPI6oS6Q8LmeEdSzqt4QKBgB5MglHboe5t/ZK5tHibgApOrGJlMEkohYmfrFz0
OG9j5OnhHBiGGGI8V4kYhUWdJqBDtFAN6qH2Yjs2Gwd0t9k+gL9X1zwOIiTbM/OZ
cZdtK2Ov/5DJbFVOTTx+zKwda0Xqtfagcmjtyjr+4p0Kw5JYzzYrsHQQzO4F2nZM
ETIXAoGADskTzhgpPrC5/qfuLY4gBUtCfYIb8kaKN90AT8A/14lBrT4lSnmsEvKP
tRDmFjnc/ogDlHa5SRDijtT6UoyQPuauAt6DYrJ8G6qKJqiMwJcuLV1XFks7z1J8
VzB8kso1pPAtcvVXBPklsjvZ10NdQOCqm4N3EVp69agbB1oco4I=
-----END RSA PRIVATE KEY-----

View File

@@ -0,0 +1,18 @@
-----BEGIN CERTIFICATE-----
MIIC8DCCAlmgAwIBAgIJAOD63PlXjJi8MA0GCSqGSIb3DQEBBQUAMIGQMQswCQYD
VQQGEwJHQjEXMBUGA1UECAwOVW5pdGVkIEtpbmdkb20xDjAMBgNVBAcMBURlcmJ5
MRIwEAYDVQQKDAlNb3NxdWl0dG8xCzAJBgNVBAsMAkNBMRYwFAYDVQQDDA1tb3Nx
dWl0dG8ub3JnMR8wHQYJKoZIhvcNAQkBFhByb2dlckBhdGNob28ub3JnMB4XDTEy
MDYyOTIyMTE1OVoXDTIyMDYyNzIyMTE1OVowgZAxCzAJBgNVBAYTAkdCMRcwFQYD
VQQIDA5Vbml0ZWQgS2luZ2RvbTEOMAwGA1UEBwwFRGVyYnkxEjAQBgNVBAoMCU1v
c3F1aXR0bzELMAkGA1UECwwCQ0ExFjAUBgNVBAMMDW1vc3F1aXR0by5vcmcxHzAd
BgkqhkiG9w0BCQEWEHJvZ2VyQGF0Y2hvby5vcmcwgZ8wDQYJKoZIhvcNAQEBBQAD
gY0AMIGJAoGBAMYkLmX7SqOT/jJCZoQ1NWdCrr/pq47m3xxyXcI+FLEmwbE3R9vM
rE6sRbP2S89pfrCt7iuITXPKycpUcIU0mtcT1OqxGBV2lb6RaOT2gC5pxyGaFJ+h
A+GIbdYKO3JprPxSBoRponZJvDGEZuM3N7p3S/lRoi7G5wG5mvUmaE5RAgMBAAGj
UDBOMB0GA1UdDgQWBBTad2QneVztIPQzRRGj6ZHKqJTv5jAfBgNVHSMEGDAWgBTa
d2QneVztIPQzRRGj6ZHKqJTv5jAMBgNVHRMEBTADAQH/MA0GCSqGSIb3DQEBBQUA
A4GBAAqw1rK4NlRUCUBLhEFUQasjP7xfFqlVbE2cRy0Rs4o3KS0JwzQVBwG85xge
REyPOFdGdhBY2P1FNRy0MDr6xr+D2ZOwxs63dG1nnAnWZg7qwoLgpZ4fESPD3PkA
1ZgKJc2zbSQ9fCPxt2W3mdVav66c6fsb7els2W2Iz7gERJSX
-----END CERTIFICATE-----

View File

@@ -0,0 +1,21 @@
-----BEGIN CERTIFICATE-----
MIIDizCCAnOgAwIBAgIBATANBgkqhkiG9w0BAQUFADBgMQswCQYDVQQGEwJVUzEO
MAwGA1UECAwFRHVtbXkxDjAMBgNVBAcMBUR1bW15MQ4wDAYDVQQKDAVEdW1teTEO
MAwGA1UECwwFRHVtbXkxETAPBgNVBAMMCER1bW15IENBMB4XDTEzMTAyMTE5MjQy
M1oXDTE4MDkyNTE5MjQyM1owYDELMAkGA1UEBhMCVVMxDjAMBgNVBAgMBUR1bW15
MQ4wDAYDVQQHDAVEdW1teTEOMAwGA1UECgwFRHVtbXkxDjAMBgNVBAsMBUR1bW15
MREwDwYDVQQDDAhEdW1teSBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoC
ggEBAMLR0DHck8OtiA34k/7MqgQdharDu72HBPBCZxQ0SlaUK7/Qa3IwODk1IIzj
fmWCsH4+HfEYgrfWGVlD7YG+61FE/HeeN63hoBi5S1l5kIGk5FIv/OL/mBBe1ROa
FmIa4MurHa7a0UDUl7Hm4/GXLCpSc6vQohXzHpqwZ9BiZ0t0sLuP754yakwnToJ8
FmbOBumj2TZP9D68gACTwcoxzwNo1OWLOEW2GzWwwOlKYnWDAaq5wQvA7pfAcyPN
NOy7PJU1yC1p/4bYH8gEfhjeYsJLN8aqjgO/Kw2XICp1R+yYKTxkUu+Ri2MPavjC
nQhqYWhvZJpWsgq8e1k9f/26EksCAwEAAaNQME4wHQYDVR0OBBYEFFu7Po4tkK2u
WAf/UwAYmP9EhEy6MB8GA1UdIwQYMBaAFFu7Po4tkK2uWAf/UwAYmP9EhEy6MAwG
A1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBADyJC71JEKYa9ipLXwI97vMZ
T8kQeZwB74giPQNbGhRGtn+br6WZGtTUm9Zvwf6Wj5qeR0K07iFWasSSOGyBzY4x
Q4Z8lxWQgNgh8Ea+Ki/ylgeFdKj6G3iPgMFevNkGwjOejvkI3UN7b1oiZ0Z4XftK
TsLGKZQXU6bFqdZnBk8H79pbRSGDyzGy3NysExmYP5hfLLS02tRD16kabrbPvoWo
gB+KwZWKg6Sv0iNKthiHTigxNgMsv+SetnX9xGjtTdWo+qWBExccQ2cCHNDmAG6L
E+ZgH7pAeJMlyllaccxY1FJjHbM8zjfxiXj8E/qz6iKvF2iKoVlX9RpJbrn2X7M=
-----END CERTIFICATE-----

View File

@@ -0,0 +1,27 @@
-----BEGIN RSA PRIVATE KEY-----
MIIEowIBAAKCAQEAwtHQMdyTw62IDfiT/syqBB2FqsO7vYcE8EJnFDRKVpQrv9Br
cjA4OTUgjON+ZYKwfj4d8RiCt9YZWUPtgb7rUUT8d543reGgGLlLWXmQgaTkUi/8
4v+YEF7VE5oWYhrgy6sdrtrRQNSXsebj8ZcsKlJzq9CiFfMemrBn0GJnS3Swu4/v
njJqTCdOgnwWZs4G6aPZNk/0PryAAJPByjHPA2jU5Ys4RbYbNbDA6UpidYMBqrnB
C8Dul8BzI8007Ls8lTXILWn/htgfyAR+GN5iwks3xqqOA78rDZcgKnVH7JgpPGRS
75GLYw9q+MKdCGphaG9kmlayCrx7WT1//boSSwIDAQABAoIBAGphOzge5Cjzdtl6
JQX7J9M7c6O9YaSqN44iFDs6GmWQXxtMaX9eyTSjx/RmvLwdUtZ8gMkHw0kzBYBy
0RwJ7mDgNKP0px6xl0Qo2fYvpTLFoU8nmQUy4AwAXIVpnFNRrfJIq9qw7ZZi/7pL
A6kGDT3G7Bajw/4MVWfOb8GgGhte1ZhZgXFEZNjGkhwi3Na1/6slOQIfnkkhco0X
ru1Cw82nXNPHqu6K+pbHP9ucYdUNZWRh+yQS3p92lr5tB3/IL/lD0Cl3+xP8JFl+
5NMSISOKGb3ld0rzrJd1ncgLgv/XlHu8DqvcFs9QwXbaUlG0U/0GrorGYqFaZYaH
R1rkZjECgYEA9mAarVAeL7IOeEIg28f/qyp//5+pMzRpVhnI+xscHB5QUO9WH+uE
nOXwcGvcRME134H4o/0j75aMhVs7sGfMOQ+enAwOxRC5h4MCClDSWysWftU8Ihhf
Sm6eZ0kYLZNqXt/TxTs124NiF1Bb5pekzEr9fTj//vP4meuAQ/D0JoUCgYEAym4f
BCm5tLwYYxZM4tko0g9BHxy4aAPfyshuLed1JjkK4JCFp368GBoknj5rUNewTun2
1zkQF9b5Mi3k5qWkboP5rpp7DuG3PJdWypV6b/btUeqcyG1gteQwTAwebfqeM0vH
QvpuAoRMtEcSBQBl2s9zgmObXUpDlLwuIlL+to8CgYEAyJBtxx8Mo9k4jE+Q/jnu
+QFtF8R68jM9eRkeksR7+qv2yBw+KVgKKcvKE0rLErGS0LO2nJELexQ8qqcdjTrC
dsUvYmsybtxxnE5bD9jBlfQaqP+fp0Xd9PLeQsivRRLXqgpeFBZifqOS69XAKpTS
VHjLqPAI/hzQCUU8spJpvx0CgYAePgt2NMGgxcUi8I72CRl3IH5LJqBKMeH6Sq1j
QEQZPMZqPE0rc9yoASfdWFfyEPcvIvcUulq0JRK/s2mSJ8cEF8Vyl3OxCnm0nKuD
woczOQHFjjZ0HxsmsXuhsOHO7nU6FqUjVYSf7aIEAOYpRyDwarPIFBd+/XxROTfv
OtUA8wKBgAOiGXRxycb4rAtJBDqPAgdAAwNgvQHyVgn32ArWtgu8ermuZW5h1y45
hULFvCbLSCpo+I7QhRhw4y2DoB1DgIw04BeFUIcE+az7HH3euAyCLQ0caaA8Xk/6
bpPfUMe1SNi51f345QlOPvvwGllTC6DeBhZ730k7VNB32dOCV3kE
-----END RSA PRIVATE KEY-----

View File

@@ -0,0 +1,21 @@
-----BEGIN CERTIFICATE-----
MIIDYTCCAkmgAwIBAgIBATANBgkqhkiG9w0BAQUFADBtMQswCQYDVQQGEwJVUzEO
MAwGA1UECAwFRHVtbXkxDjAMBgNVBAcMBUR1bW15MQ4wDAYDVQQKDAVEdW1teTEO
MAwGA1UECwwFRHVtbXkxHjAcBgNVBAMMFUR1bW15IEludGVybWVkaWF0ZSBDQTAe
Fw0xMzEwMjExOTI0MjNaFw0xODA5MjUxOTI0MjNaMGYxCzAJBgNVBAYTAlVTMQ4w
DAYDVQQIDAVEdW1teTEOMAwGA1UEBwwFRHVtbXkxDjAMBgNVBAoMBUR1bW15MQ4w
DAYDVQQLDAVEdW1teTEXMBUGA1UEAwwORHVtbXkgKHNlcnZlcikwggEiMA0GCSqG
SIb3DQEBAQUAA4IBDwAwggEKAoIBAQC0fQCRUWXt+i7JMR55Zuo6wBRxG7RnPutN
2L7J/18io52vxjm8AZDiC0JFkCHh72ZzvbgVA+e+WxAIYfioRis4JWw4jK8v5m8q
cZzS0GJNTMROPiZQi7A81tAbrV00XN7d5PsmIJ2Bf4XbJWMy31CsmoFloeRMd7bR
LxwDIb0qqRawhKsWdfZB/c9wGKmHlei50B7PXk+koKnVdsLwXxtCZDvc/3fNRHEK
lZs4m0N05G38FdrnczPm/0pie87nK9rnklL7u1sYOukOznnOtW5h7+A4M+DxzME0
HRU6k4d+6QvukxBlsE93gHhwRsejIuDGlqD+DRxk2PdmmgsmPH59AgMBAAGjEzAR
MA8GA1UdEQQIMAaHBAoKBOQwDQYJKoZIhvcNAQEFBQADggEBAJ3bKs2b4cAJWTZj
69dMEfYZKcQIXs7euwtKlP7H8m5c+X5KmZPi1Puq4Z0gtvLu/z7J9UjZjG0CoylV
q15Zp5svryJ7XzcsZs7rwyo1JtngW1z54wr9MezqIOF2w12dTwEAINFsW7TxAsH7
bfqkzZjuCbbsww5q4eHuZp0yaMHc3hOGaUot27OTlxlIMhv7VBBqWAj0jmvAfTKf
la0SiL/Mc8rD8D5C0SXGcCL6li/kqtinAxzhokuyyPf+hQX35kcZxEPu6WxtYVLv
hMzrokOZP2FrGbCnhaNT8gw4Aa0RXV1JgonRWYSbkeaCzvr2bJ0OuJiDdwdRKvOo
raKLlfY=
-----END CERTIFICATE-----

View File

@@ -0,0 +1,27 @@
-----BEGIN RSA PRIVATE KEY-----
MIIEowIBAAKCAQEAtH0AkVFl7fouyTEeeWbqOsAUcRu0Zz7rTdi+yf9fIqOdr8Y5
vAGQ4gtCRZAh4e9mc724FQPnvlsQCGH4qEYrOCVsOIyvL+ZvKnGc0tBiTUzETj4m
UIuwPNbQG61dNFze3eT7JiCdgX+F2yVjMt9QrJqBZaHkTHe20S8cAyG9KqkWsISr
FnX2Qf3PcBiph5XoudAez15PpKCp1XbC8F8bQmQ73P93zURxCpWbOJtDdORt/BXa
53Mz5v9KYnvO5yva55JS+7tbGDrpDs55zrVuYe/gODPg8czBNB0VOpOHfukL7pMQ
ZbBPd4B4cEbHoyLgxpag/g0cZNj3ZpoLJjx+fQIDAQABAoIBAG0UfxtUTn4dDdma
TgihIj6Ph8s0Kzua0yshK215YU3WBJ8O9iWh7KYwl8Ti7xdVUF3y8yYATjbFYlMu
otFQVx5/v4ANxnL0mYrVTyo5tq9xDdMbzJwxUDn0uaGAjSvwVOFWWlMYsxhoscVY
OzOrs14dosaBqTBtyZdzGULrSSBWPCBlucRcvTV/eZwgYrYJ3bG66ZTfdc930KPj
nfkWrsAWmPz8irHoWQ2OX+ZJTprVYRYIZXqpFn3zuwmhpJkZUVULMMk6LFBKDmBT
F2+b4h49P+oNJ+6CRoOERHYq2k1MmYBcu1z8lMjdfRGUDdK4vS9pcqhBXJJg1vU9
APRtfiECgYEA6Y3LqQJLkUI0w6g/9T+XyzUoi0aUfH6PT81XnGYqJxTBHinZvgML
mF3qtZ0bHGwEoAsyhSgDkeCawE/E7Phd+B6aku2QMVm8GHygZg0Pbao4cxXv+CF3
i1Lo7n3zY0kTVrjsvDRsDDESmRK4Ea48fJwOfUEtfG6VDtwmZAe8chcCgYEAxdWd
sWcc45ARi2vY6yb5Ysgt/g0z26KyQydF+GMWIz1FDfUxXJ/axdCovd3VIHDvItJE
n9LjFiobkyOKX99ou1foWwsmhn11duVrF7hsVrE0nsbd4RX3sTbqXa9x3GN/ujFr
0xHUTmiXt3Qyn/076jBiLGnbtzSxJ/IZIEI9VIsCgYEAketHnTaT5BOLR9ss6ptq
yUlTJYFZcFbaTy+qV0r1dyleZuwa4L6iVfYHmKSptZ4/XYbhb5RKdq/vv8uW679Z
ZpYoWTgX6N15yYrD5D6wrwG09yJzpYGzYNbSNX93u0aC0KIFNqlCAHQAfKbXXiSQ
IgKWgudf9ehZNMmTKtgygs0CgYAoTV9Fr7Lj7QqV84+KQDNX2137PmdNHDTil1Ka
ylzNKwMxV70JmIsx91MY8uMjK76bwmg2gvi+IC/j5r6ez11/pOXx/jCH/3D5mr0Z
ZPm1I36LxgmXfCkskfpmwYIZmq9/l+fWZPByVL5roiFaFHWrPNYTJDGdff+FGr3h
o3zpBwKBgDY1sih/nY+6rwOP+DcabGK9KFFKLXsoJrXobEniLxp7oFaGN2GkmKvN
NajCs5pr3wfb4LrVrsNvERnUsUXWg6ReLqfWbT4bmjzE2iJ3IbtVQ5M4kl6YrbdZ
PMgWoLCqnoo8NoGBtmVMWhaXNJvVZPgZHk33T5F0Cg6PKNdHDchH
-----END RSA PRIVATE KEY-----

View File

@@ -0,0 +1,51 @@
package main
import (
"fmt"
"os"
"time"
MQTT "git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git"
)
var f MQTT.MessageHandler = func(client *MQTT.Client, msg MQTT.Message) {
fmt.Printf("TOPIC: %s\n", msg.Topic())
fmt.Printf("MSG: %s\n", msg.Payload())
}
var onConnect MQTT.OnConnectHandler = func(client *MQTT.Client) {
fmt.Println("onConnect")
if token := client.Subscribe("shirou@github/#", 0, nil); token.Wait() && token.Error() != nil {
fmt.Println(token.Error())
os.Exit(1)
}
}
var subscribed = "#"
func main() {
opts := MQTT.NewClientOptions().AddBroker("tcp://lite.mqtt.shiguredo.jp:1883")
opts.SetDefaultPublishHandler(f)
opts.SetOnConnectHandler(onConnect)
opts.SetCleanSession(true)
opts.SetUsername("shirou@github")
opts.SetPassword("8Ub6F68kfYlr7RoV")
c := MQTT.NewClient(opts)
if token := c.Connect(); token.Wait() && token.Error() != nil {
panic(token.Error())
}
qos := 0
retain := false
payload := "sanple"
topic := "shirou@github/log"
token := c.Publish(topic, byte(qos), retain, payload)
// token.Wait()
fmt.Println("%v", token.Error())
for {
time.Sleep(1 * time.Second)
}
}

View File

@@ -0,0 +1,35 @@
package main
import (
"fmt"
"os"
"time"
MQTT "git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git"
)
var f MQTT.MessageHandler = func(client *MQTT.Client, msg MQTT.Message) {
fmt.Printf("TOPIC: %s\n", msg.Topic())
fmt.Printf("MSG: %s\n", msg.Payload())
}
var subscribed = "#"
func main() {
opts := MQTT.NewClientOptions().AddBroker("tcp://localhost:1883")
opts.SetDefaultPublishHandler(f)
opts.SetCleanSession(true)
c := MQTT.NewClient(opts)
if token := c.Connect(); token.Wait() && token.Error() != nil {
panic(token.Error())
}
if token := c.Subscribe("#", 0, nil); token.Wait() && token.Error() != nil {
fmt.Println(token.Error())
os.Exit(1)
}
for {
time.Sleep(1 * time.Second)
}
}

View File

@@ -0,0 +1,42 @@
package main
import (
"fmt"
"os"
"time"
MQTT "git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git"
)
var f MQTT.MessageHandler = func(client *MQTT.Client, msg MQTT.Message) {
fmt.Printf("TOPIC: %s\n", msg.Topic())
fmt.Printf("MSG: %s\n", msg.Payload())
}
var onConnect MQTT.OnConnectHandler = func(client *MQTT.Client) {
fmt.Println("onConnect")
if token := client.Subscribe("#", 0, nil); token.Wait() && token.Error() != nil {
fmt.Println(token.Error())
os.Exit(1)
}
}
var subscribed = "#"
func main() {
// opts := MQTT.NewClientOptions().AddBroker("tcp://localhost:1883")
opts := MQTT.NewClientOptions().AddBroker("tcp://lite.mqtt.shiguredo.jp:1883")
opts.SetDefaultPublishHandler(f)
opts.SetOnConnectHandler(onConnect)
opts.SetCleanSession(true)
opts.SetUsername("shirou@github.com")
c := MQTT.NewClient(opts)
if token := c.Connect(); token.Wait() && token.Error() != nil {
panic(token.Error())
}
for {
time.Sleep(1 * time.Second)
}
}

View File

@@ -0,0 +1,130 @@
/*
* Copyright (c) 2013 IBM Corp.
*
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Eclipse Public License v1.0
* which accompanies this distribution, and is available at
* http://www.eclipse.org/legal/epl-v10.html
*
* Contributors:
* Seth Hoenig
* Allan Stockdill-Mander
* Mike Robertson
*/
package main
import (
"flag"
"fmt"
"os"
MQTT "git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git"
)
/*
Options:
[-help] Display help
[-a pub|sub] Action pub (publish) or sub (subscribe)
[-m <message>] Payload to send
[-n <number>] Number of messages to send or receive
[-q 0|1|2] Quality of Service
[-clean] CleanSession (true if -clean is present)
[-id <clientid>] CliendID
[-user <user>] User
[-password <password>] Password
[-broker <uri>] Broker URI
[-topic <topic>] Topic
[-store <path>] Store Directory
*/
func main() {
topic := flag.String("topic", "", "The topic name to/from which to publish/subscribe")
broker := flag.String("broker", "tcp://iot.eclipse.org:1883", "The broker URI. ex: tcp://10.10.1.1:1883")
password := flag.String("password", "", "The password (optional)")
user := flag.String("user", "", "The User (optional)")
id := flag.String("id", "testgoid", "The ClientID (optional)")
cleansess := flag.Bool("clean", false, "Set Clean Session (default false)")
qos := flag.Int("qos", 0, "The Quality of Service 0,1,2 (default 0)")
num := flag.Int("num", 1, "The number of messages to publish or subscribe (default 1)")
payload := flag.String("message", "", "The message text to publish (default empty)")
action := flag.String("action", "", "Action publish or subscribe (required)")
store := flag.String("store", ":memory:", "The Store Directory (default use memory store)")
flag.Parse()
if *action != "pub" && *action != "sub" {
fmt.Println("Invalid setting for -action, must be pub or sub")
return
}
if *topic == "" {
fmt.Println("Invalid setting for -topic, must not be empty")
return
}
fmt.Printf("Sample Info:\n")
fmt.Printf("\taction: %s\n", *action)
fmt.Printf("\tbroker: %s\n", *broker)
fmt.Printf("\tclientid: %s\n", *id)
fmt.Printf("\tuser: %s\n", *user)
fmt.Printf("\tpassword: %s\n", *password)
fmt.Printf("\ttopic: %s\n", *topic)
fmt.Printf("\tmessage: %s\n", *payload)
fmt.Printf("\tqos: %d\n", *qos)
fmt.Printf("\tcleansess: %v\n", *cleansess)
fmt.Printf("\tnum: %d\n", *num)
fmt.Printf("\tstore: %s\n", *store)
opts := MQTT.NewClientOptions()
opts.AddBroker(*broker)
opts.SetClientID(*id)
opts.SetUsername(*user)
opts.SetPassword(*password)
opts.SetCleanSession(*cleansess)
if *store != ":memory:" {
opts.SetStore(MQTT.NewFileStore(*store))
}
if *action == "pub" {
client := MQTT.NewClient(opts)
if token := client.Connect(); token.Wait() && token.Error() != nil {
panic(token.Error())
}
fmt.Println("Sample Publisher Started")
for i := 0; i < *num; i++ {
fmt.Println("---- doing publish ----")
token := client.Publish(*topic, byte(*qos), false, *payload)
token.Wait()
}
client.Disconnect(250)
fmt.Println("Sample Publisher Disconnected")
} else {
receiveCount := 0
choke := make(chan [2]string)
opts.SetDefaultPublishHandler(func(client *MQTT.Client, msg MQTT.Message) {
choke <- [2]string{msg.Topic(), string(msg.Payload())}
})
client := MQTT.NewClient(opts)
if token := client.Connect(); token.Wait() && token.Error() != nil {
panic(token.Error())
}
if token := client.Subscribe(*topic, byte(*qos), nil); token.Wait() && token.Error() != nil {
fmt.Println(token.Error())
os.Exit(1)
}
for receiveCount < *num {
incoming := <-choke
fmt.Printf("RECEIVED TOPIC: %s MESSAGE: %s\n", incoming[0], incoming[1])
receiveCount++
}
client.Disconnect(250)
fmt.Println("Sample Subscriber Disconnected")
}
}

View File

@@ -0,0 +1,58 @@
/*
* Copyright (c) 2013 IBM Corp.
*
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Eclipse Public License v1.0
* which accompanies this distribution, and is available at
* http://www.eclipse.org/legal/epl-v10.html
*
* Contributors:
* Seth Hoenig
* Allan Stockdill-Mander
* Mike Robertson
*/
package main
import (
"fmt"
"os"
"time"
MQTT "git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git"
)
var f MQTT.MessageHandler = func(client *MQTT.Client, msg MQTT.Message) {
fmt.Printf("TOPIC: %s\n", msg.Topic())
fmt.Printf("MSG: %s\n", msg.Payload())
}
func main() {
opts := MQTT.NewClientOptions().AddBroker("tcp://iot.eclipse.org:1883").SetClientID("gotrivial")
opts.SetDefaultPublishHandler(f)
c := MQTT.NewClient(opts)
if token := c.Connect(); token.Wait() && token.Error() != nil {
panic(token.Error())
}
if token := c.Subscribe("/go-mqtt/sample", 0, nil); token.Wait() && token.Error() != nil {
fmt.Println(token.Error())
os.Exit(1)
}
for i := 0; i < 5; i++ {
text := fmt.Sprintf("this is msg #%d!", i)
token := c.Publish("/go-mqtt/sample", 0, false, text)
token.Wait()
}
time.Sleep(3 * time.Second)
if token := c.Unsubscribe("/go-mqtt/sample"); token.Wait() && token.Error() != nil {
fmt.Println(token.Error())
os.Exit(1)
}
c.Disconnect(250)
}

View File

@@ -0,0 +1,123 @@
/*
* Copyright (c) 2013 IBM Corp.
*
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Eclipse Public License v1.0
* which accompanies this distribution, and is available at
* http://www.eclipse.org/legal/epl-v10.html
*
* Contributors:
* Seth Hoenig
* Allan Stockdill-Mander
* Mike Robertson
*/
/*
To run this sample, The following certificates
must be created:
rootCA-crt.pem - root certificate authority that is used
to sign and verify the client and server
certificates.
rootCA-key.pem - keyfile for the rootCA.
server-crt.pem - server certificate signed by the CA.
server-key.pem - keyfile for the server certificate.
client-crt.pem - client certificate signed by the CA.
client-key.pem - keyfile for the client certificate.
CAfile.pem - file containing concatenated CA certificates
if there is more than 1 in the chain.
(e.g. root CA -> intermediate CA -> server cert)
Instead of creating CAfile.pem, rootCA-crt.pem can be added
to the default openssl CA certificate bundle. To find the
default CA bundle used, check:
$GO_ROOT/src/pks/crypto/x509/root_unix.go
To use this CA bundle, just set tls.Config.RootCAs = nil.
*/
package main
import "io/ioutil"
import "fmt"
import "time"
import "crypto/tls"
import "crypto/x509"
import MQTT "git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git"
func NewTLSConfig() *tls.Config {
// Import trusted certificates from CAfile.pem.
// Alternatively, manually add CA certificates to
// default openssl CA bundle.
certpool := x509.NewCertPool()
pemCerts, err := ioutil.ReadFile("samplecerts/CAfile.pem")
if err == nil {
certpool.AppendCertsFromPEM(pemCerts)
}
// Import client certificate/key pair
cert, err := tls.LoadX509KeyPair("samplecerts/client-crt.pem", "samplecerts/client-key.pem")
if err != nil {
panic(err)
}
// Just to print out the client certificate..
cert.Leaf, err = x509.ParseCertificate(cert.Certificate[0])
if err != nil {
panic(err)
}
fmt.Println(cert.Leaf)
// Create tls.Config with desired tls properties
return &tls.Config{
// RootCAs = certs used to verify server cert.
RootCAs: certpool,
// ClientAuth = whether to request cert from server.
// Since the server is set up for SSL, this happens
// anyways.
ClientAuth: tls.NoClientCert,
// ClientCAs = certs used to validate client cert.
ClientCAs: nil,
// InsecureSkipVerify = verify that cert contents
// match server. IP matches what is in cert etc.
InsecureSkipVerify: true,
// Certificates = list of certs client sends to server.
Certificates: []tls.Certificate{cert},
}
}
var f MQTT.MessageHandler = func(client *MQTT.Client, msg MQTT.Message) {
fmt.Printf("TOPIC: %s\n", msg.Topic())
fmt.Printf("MSG: %s\n", msg.Payload())
}
func main() {
tlsconfig := NewTLSConfig()
opts := MQTT.NewClientOptions()
opts.AddBroker("ssl://iot.eclipse.org:8883")
opts.SetClientID("ssl-sample").SetTLSConfig(tlsconfig)
opts.SetDefaultPublishHandler(f)
// Start the connection
c := MQTT.NewClient(opts)
if token := c.Connect(); token.Wait() && token.Error() != nil {
panic(token.Error())
}
c.Subscribe("/go-mqtt/sample", 0, nil)
i := 0
for _ = range time.Tick(time.Duration(1) * time.Second) {
if i == 5 {
break
}
text := fmt.Sprintf("this is msg #%d!", i)
c.Publish("/go-mqtt/sample", 0, false, text)
i++
}
c.Disconnect(250)
}

View File

@@ -0,0 +1,70 @@
/*
* Copyright (c) 2013 IBM Corp.
*
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Eclipse Public License v1.0
* which accompanies this distribution, and is available at
* http://www.eclipse.org/legal/epl-v10.html
*
* Contributors:
* Seth Hoenig
* Allan Stockdill-Mander
* Mike Robertson
*/
package main
import (
"bufio"
"crypto/tls"
"flag"
"fmt"
"io"
//"log"
"os"
"strconv"
"time"
)
import MQTT "git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git"
func main() {
//MQTT.DEBUG = log.New(os.Stdout, "", 0)
//MQTT.ERROR = log.New(os.Stdout, "", 0)
stdin := bufio.NewReader(os.Stdin)
hostname, _ := os.Hostname()
server := flag.String("server", "tcp://127.0.0.1:1883", "The full URL of the MQTT server to connect to")
topic := flag.String("topic", hostname, "Topic to publish the messages on")
qos := flag.Int("qos", 0, "The QoS to send the messages at")
retained := flag.Bool("retained", false, "Are the messages sent with the retained flag")
clientid := flag.String("clientid", hostname+strconv.Itoa(time.Now().Second()), "A clientid for the connection")
username := flag.String("username", "", "A username to authenticate to the MQTT server")
password := flag.String("password", "", "Password to match username")
flag.Parse()
connOpts := MQTT.NewClientOptions().AddBroker(*server).SetClientID(*clientid).SetCleanSession(true)
if *username != "" {
connOpts.SetUsername(*username)
if *password != "" {
connOpts.SetPassword(*password)
}
}
tlsConfig := &tls.Config{InsecureSkipVerify: true, ClientAuth: tls.NoClientCert}
connOpts.SetTLSConfig(tlsConfig)
client := MQTT.NewClient(connOpts)
if token := client.Connect(); token.Wait() && token.Error() != nil {
fmt.Println(token.Error())
return
}
fmt.Printf("Connected to %s\n", *server)
for {
message, err := stdin.ReadString('\n')
if err == io.EOF {
os.Exit(0)
}
client.Publish(*topic, byte(*qos), *retained, message)
}
}

View File

@@ -0,0 +1,85 @@
/*
* Copyright (c) 2013 IBM Corp.
*
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Eclipse Public License v1.0
* which accompanies this distribution, and is available at
* http://www.eclipse.org/legal/epl-v10.html
*
* Contributors:
* Seth Hoenig
* Allan Stockdill-Mander
* Mike Robertson
*/
package main
import (
"crypto/tls"
"flag"
"fmt"
//"log"
"os"
"os/signal"
"strconv"
"syscall"
"time"
MQTT "git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git"
)
func onMessageReceived(client *MQTT.Client, message MQTT.Message) {
fmt.Printf("Received message on topic: %s\nMessage: %s\n", message.Topic(), message.Payload())
}
var i int64
func main() {
//MQTT.DEBUG = log.New(os.Stdout, "", 0)
//MQTT.ERROR = log.New(os.Stdout, "", 0)
c := make(chan os.Signal, 1)
i = 0
signal.Notify(c, os.Interrupt, syscall.SIGTERM)
go func() {
<-c
fmt.Println("signal received, exiting")
os.Exit(0)
}()
hostname, _ := os.Hostname()
server := flag.String("server", "tcp://127.0.0.1:1883", "The full url of the MQTT server to connect to ex: tcp://127.0.0.1:1883")
topic := flag.String("topic", "#", "Topic to subscribe to")
qos := flag.Int("qos", 0, "The QoS to subscribe to messages at")
clientid := flag.String("clientid", hostname+strconv.Itoa(time.Now().Second()), "A clientid for the connection")
username := flag.String("username", "", "A username to authenticate to the MQTT server")
password := flag.String("password", "", "Password to match username")
flag.Parse()
connOpts := &MQTT.ClientOptions{
ClientID: *clientid,
CleanSession: true,
Username: *username,
Password: *password,
MaxReconnectInterval: 1 * time.Second,
KeepAlive: 30 * time.Second,
TLSConfig: tls.Config{InsecureSkipVerify: true, ClientAuth: tls.NoClientCert},
}
connOpts.AddBroker(*server)
connOpts.OnConnect = func(c *MQTT.Client) {
if token := c.Subscribe(*topic, byte(*qos), onMessageReceived); token.Wait() && token.Error() != nil {
panic(token.Error())
}
}
client := MQTT.NewClient(connOpts)
if token := client.Connect(); token.Wait() && token.Error() != nil {
panic(token.Error())
} else {
fmt.Printf("Connected to %s\n", *server)
}
for {
time.Sleep(1 * time.Second)
}
}

View File

@@ -0,0 +1,125 @@
/*
* Copyright (c) 2013 IBM Corp.
*
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Eclipse Public License v1.0
* which accompanies this distribution, and is available at
* http://www.eclipse.org/legal/epl-v10.html
*
* Contributors:
* Seth Hoenig
* Allan Stockdill-Mander
* Mike Robertson
*/
package mqtt
import (
"fmt"
"git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/packets"
"strconv"
)
const (
inboundPrefix = "i."
outboundPrefix = "o."
)
// Store is an interface which can be used to provide implementations
// for message persistence.
// Because we may have to store distinct messages with the same
// message ID, we need a unique key for each message. This is
// possible by prepending "i." or "o." to each message id
type Store interface {
Open()
Put(string, packets.ControlPacket)
Get(string) packets.ControlPacket
All() []string
Del(string)
Close()
Reset()
}
// A key MUST have the form "X.[messageid]"
// where X is 'i' or 'o'
func mIDFromKey(key string) uint16 {
s := key[2:]
i, err := strconv.Atoi(s)
chkerr(err)
return uint16(i)
}
// Return a string of the form "i.[id]"
func inboundKeyFromMID(id uint16) string {
return fmt.Sprintf("%s%d", inboundPrefix, id)
}
// Return a string of the form "o.[id]"
func outboundKeyFromMID(id uint16) string {
return fmt.Sprintf("%s%d", outboundPrefix, id)
}
// govern which outgoing messages are persisted
func persistOutbound(s Store, m packets.ControlPacket) {
switch m.Details().Qos {
case 0:
switch m.(type) {
case *packets.PubackPacket, *packets.PubcompPacket:
// Sending puback. delete matching publish
// from ibound
s.Del(inboundKeyFromMID(m.Details().MessageID))
}
case 1:
switch m.(type) {
case *packets.PublishPacket, *packets.PubrelPacket, *packets.SubscribePacket, *packets.UnsubscribePacket:
// Sending publish. store in obound
// until puback received
s.Put(outboundKeyFromMID(m.Details().MessageID), m)
default:
chkcond(false)
}
case 2:
switch m.(type) {
case *packets.PublishPacket:
// Sending publish. store in obound
// until pubrel received
s.Put(outboundKeyFromMID(m.Details().MessageID), m)
default:
chkcond(false)
}
}
}
// govern which incoming messages are persisted
func persistInbound(s Store, m packets.ControlPacket) {
switch m.Details().Qos {
case 0:
switch m.(type) {
case *packets.PubackPacket, *packets.SubackPacket, *packets.UnsubackPacket, *packets.PubcompPacket:
// Received a puback. delete matching publish
// from obound
s.Del(outboundKeyFromMID(m.Details().MessageID))
case *packets.PublishPacket, *packets.PubrecPacket, *packets.PingrespPacket, *packets.ConnackPacket:
default:
chkcond(false)
}
case 1:
switch m.(type) {
case *packets.PublishPacket, *packets.PubrelPacket:
// Received a publish. store it in ibound
// until puback sent
s.Put(inboundKeyFromMID(m.Details().MessageID), m)
default:
chkcond(false)
}
case 2:
switch m.(type) {
case *packets.PublishPacket:
// Received a publish. store it in ibound
// until pubrel received
s.Put(inboundKeyFromMID(m.Details().MessageID), m)
default:
chkcond(false)
}
}
}

View File

@@ -0,0 +1,156 @@
/*
* Copyright (c) 2014 IBM Corp.
*
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Eclipse Public License v1.0
* which accompanies this distribution, and is available at
* http://www.eclipse.org/legal/epl-v10.html
*
* Contributors:
* Allan Stockdill-Mander
*/
package mqtt
import (
"git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/packets"
"sync"
"time"
)
//PacketAndToken is a struct that contains both a ControlPacket and a
//Token. This struct is passed via channels between the client interface
//code and the underlying code responsible for sending and receiving
//MQTT messages.
type PacketAndToken struct {
p packets.ControlPacket
t Token
}
//Token defines the interface for the tokens used to indicate when
//actions have completed.
type Token interface {
Wait() bool
WaitTimeout(time.Duration) bool
flowComplete()
Error() error
}
type baseToken struct {
m sync.RWMutex
complete chan struct{}
ready bool
err error
}
// Wait will wait indefinitely for the Token to complete, ie the Publish
// to be sent and confirmed receipt from the broker
func (b *baseToken) Wait() bool {
b.m.Lock()
defer b.m.Unlock()
if !b.ready {
<-b.complete
b.ready = true
}
return b.ready
}
// WaitTimeout takes a time in ms to wait for the flow associated with the
// Token to complete, returns true if it returned before the timeout or
// returns false if the timeout occurred. In the case of a timeout the Token
// does not have an error set in case the caller wishes to wait again
func (b *baseToken) WaitTimeout(d time.Duration) bool {
b.m.Lock()
defer b.m.Unlock()
if !b.ready {
select {
case <-b.complete:
b.ready = true
case <-time.After(d):
}
}
return b.ready
}
func (b *baseToken) flowComplete() {
close(b.complete)
}
func (b *baseToken) Error() error {
b.m.RLock()
defer b.m.RUnlock()
return b.err
}
func newToken(tType byte) Token {
switch tType {
case packets.Connect:
return &ConnectToken{baseToken: baseToken{complete: make(chan struct{})}}
case packets.Subscribe:
return &SubscribeToken{baseToken: baseToken{complete: make(chan struct{})}, subResult: make(map[string]byte)}
case packets.Publish:
return &PublishToken{baseToken: baseToken{complete: make(chan struct{})}}
case packets.Unsubscribe:
return &UnsubscribeToken{baseToken: baseToken{complete: make(chan struct{})}}
case packets.Disconnect:
return &DisconnectToken{baseToken: baseToken{complete: make(chan struct{})}}
}
return nil
}
//ConnectToken is an extension of Token containing the extra fields
//required to provide information about calls to Connect()
type ConnectToken struct {
baseToken
returnCode byte
}
//ReturnCode returns the acknowlegement code in the connack sent
//in response to a Connect()
func (c *ConnectToken) ReturnCode() byte {
c.m.RLock()
defer c.m.RUnlock()
return c.returnCode
}
//PublishToken is an extension of Token containing the extra fields
//required to provide information about calls to Publish()
type PublishToken struct {
baseToken
messageID uint16
}
//MessageID returns the MQTT message ID that was assigned to the
//Publish packet when it was sent to the broker
func (p *PublishToken) MessageID() uint16 {
return p.messageID
}
//SubscribeToken is an extension of Token containing the extra fields
//required to provide information about calls to Subscribe()
type SubscribeToken struct {
baseToken
subs []string
subResult map[string]byte
}
//Result returns a map of topics that were subscribed to along with
//the matching return code from the broker. This is either the Qos
//value of the subscription or an error code.
func (s *SubscribeToken) Result() map[string]byte {
s.m.RLock()
defer s.m.RUnlock()
return s.subResult
}
//UnsubscribeToken is an extension of Token containing the extra fields
//required to provide information about calls to Unsubscribe()
type UnsubscribeToken struct {
baseToken
}
//DisconnectToken is an extension of Token containing the extra fields
//required to provide information about calls to Disconnect()
type DisconnectToken struct {
baseToken
}

View File

@@ -0,0 +1,82 @@
/*
* Copyright (c) 2014 IBM Corp.
*
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Eclipse Public License v1.0
* which accompanies this distribution, and is available at
* http://www.eclipse.org/legal/epl-v10.html
*
* Contributors:
* Seth Hoenig
* Allan Stockdill-Mander
* Mike Robertson
*/
package mqtt
import (
"errors"
"strings"
)
//InvalidQos is the error returned when an packet is to be sent
//with an invalid Qos value
var ErrInvalidQos = errors.New("Invalid QoS")
//InvalidTopicEmptyString is the error returned when a topic string
//is passed in that is 0 length
var ErrInvalidTopicEmptyString = errors.New("Invalid Topic; empty string")
//InvalidTopicMultilevel is the error returned when a topic string
//is passed in that has the multi level wildcard in any position but
//the last
var ErrInvalidTopicMultilevel = errors.New("Invalid Topic; multi-level wildcard must be last level")
// Topic Names and Topic Filters
// The MQTT v3.1.1 spec clarifies a number of ambiguities with regard
// to the validity of Topic strings.
// - A Topic must be between 1 and 65535 bytes.
// - A Topic is case sensitive.
// - A Topic may contain whitespace.
// - A Topic containing a leading forward slash is different than a Topic without.
// - A Topic may be "/" (two levels, both empty string).
// - A Topic must be UTF-8 encoded.
// - A Topic may contain any number of levels.
// - A Topic may contain an empty level (two forward slashes in a row).
// - A TopicName may not contain a wildcard.
// - A TopicFilter may only have a # (multi-level) wildcard as the last level.
// - A TopicFilter may contain any number of + (single-level) wildcards.
// - A TopicFilter with a # will match the absense of a level
// Example: a subscription to "foo/#" will match messages published to "foo".
func validateSubscribeMap(subs map[string]byte) ([]string, []byte, error) {
var topics []string
var qoss []byte
for topic, qos := range subs {
if err := validateTopicAndQos(topic, qos); err != nil {
return nil, nil, err
}
topics = append(topics, topic)
qoss = append(qoss, qos)
}
return topics, qoss, nil
}
func validateTopicAndQos(topic string, qos byte) error {
if len(topic) == 0 {
return ErrInvalidTopicEmptyString
}
levels := strings.Split(topic, "/")
for i, level := range levels {
if level == "#" && i != len(levels)-1 {
return ErrInvalidTopicMultilevel
}
}
if qos < 0 || qos > 2 {
return ErrInvalidQos
}
return nil
}

View File

@@ -0,0 +1,36 @@
/*
* Copyright (c) 2013 IBM Corp.
*
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Eclipse Public License v1.0
* which accompanies this distribution, and is available at
* http://www.eclipse.org/legal/epl-v10.html
*
* Contributors:
* Seth Hoenig
* Allan Stockdill-Mander
* Mike Robertson
*/
package mqtt
import (
"io/ioutil"
"log"
)
// Internal levels of library output that are initialised to not print
// anything but can be overridden by programmer
var (
ERROR *log.Logger
CRITICAL *log.Logger
WARN *log.Logger
DEBUG *log.Logger
)
func init() {
ERROR = log.New(ioutil.Discard, "", 0)
CRITICAL = log.New(ioutil.Discard, "", 0)
WARN = log.New(ioutil.Discard, "", 0)
DEBUG = log.New(ioutil.Discard, "", 0)
}

View File

@@ -0,0 +1,56 @@
/*
* Copyright (c) 2013 IBM Corp.
*
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Eclipse Public License v1.0
* which accompanies this distribution, and is available at
* http://www.eclipse.org/legal/epl-v10.html
*
* Contributors:
* Seth Hoenig
* Allan Stockdill-Mander
* Mike Robertson
*/
package mqtt
import (
"log"
"net/http"
"os"
"testing"
_ "net/http/pprof"
)
func init() {
DEBUG = log.New(os.Stderr, "DEBUG ", log.Ltime)
WARN = log.New(os.Stderr, "WARNING ", log.Ltime)
CRITICAL = log.New(os.Stderr, "CRITICAL ", log.Ltime)
ERROR = log.New(os.Stderr, "ERROR ", log.Ltime)
go func() {
log.Println(http.ListenAndServe("localhost:6060", nil))
}()
}
func Test_NewClient_simple(t *testing.T) {
ops := NewClientOptions().SetClientID("foo").AddBroker("tcp://10.10.0.1:1883")
c := NewClient(ops)
if c == nil {
t.Fatalf("ops is nil")
}
if c.options.ClientID != "foo" {
t.Fatalf("bad client id")
}
if c.options.Servers[0].Scheme != "tcp" {
t.Fatalf("bad server scheme")
}
if c.options.Servers[0].Host != "10.10.0.1:1883" {
t.Fatalf("bad server host")
}
}

View File

@@ -0,0 +1,111 @@
/*
* Copyright (c) 2013 IBM Corp.
*
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Eclipse Public License v1.0
* which accompanies this distribution, and is available at
* http://www.eclipse.org/legal/epl-v10.html
*
* Contributors:
* Seth Hoenig
* Allan Stockdill-Mander
* Mike Robertson
*/
package mqtt
import (
"fmt"
"testing"
"time"
)
type DummyToken struct{}
func (d *DummyToken) Wait() bool {
return true
}
func (d *DummyToken) WaitTimeout(t time.Duration) bool {
return true
}
func (d *DummyToken) flowComplete() {}
func (d *DummyToken) Error() error {
return nil
}
func Test_getID(t *testing.T) {
mids := &messageIds{index: make(map[uint16]Token)}
i1 := mids.getID(&DummyToken{})
if i1 != 1 {
t.Fatalf("i1 was wrong: %v", i1)
}
i2 := mids.getID(&DummyToken{})
if i2 != 2 {
t.Fatalf("i2 was wrong: %v", i2)
}
for i := uint16(3); i < 100; i++ {
id := mids.getID(&DummyToken{})
if id != i {
t.Fatalf("id was wrong expected %v got %v", i, id)
}
}
}
func Test_freeID(t *testing.T) {
mids := &messageIds{index: make(map[uint16]Token)}
i1 := mids.getID(&DummyToken{})
mids.freeID(i1)
if i1 != 1 {
t.Fatalf("i1 was wrong: %v", i1)
}
i2 := mids.getID(&DummyToken{})
fmt.Printf("i2: %v\n", i2)
}
func Test_messageids_mix(t *testing.T) {
mids := &messageIds{index: make(map[uint16]Token)}
done := make(chan bool)
a := make(chan uint16, 3)
b := make(chan uint16, 20)
c := make(chan uint16, 100)
go func() {
for i := 0; i < 10000; i++ {
a <- mids.getID(&DummyToken{})
mids.freeID(<-b)
}
done <- true
}()
go func() {
for i := 0; i < 10000; i++ {
b <- mids.getID(&DummyToken{})
mids.freeID(<-c)
}
done <- true
}()
go func() {
for i := 0; i < 10000; i++ {
c <- mids.getID(&DummyToken{})
mids.freeID(<-a)
}
done <- true
}()
<-done
<-done
<-done
}

View File

@@ -0,0 +1,126 @@
/*
* Copyright (c) 2013 IBM Corp.
*
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Eclipse Public License v1.0
* which accompanies this distribution, and is available at
* http://www.eclipse.org/legal/epl-v10.html
*
* Contributors:
* Seth Hoenig
* Allan Stockdill-Mander
* Mike Robertson
*/
package mqtt
import (
"crypto/tls"
"crypto/x509"
"testing"
"time"
)
func Test_NewClientOptions_default(t *testing.T) {
o := NewClientOptions()
if o.ClientID != "" {
t.Fatalf("bad default client id")
}
if o.Username != "" {
t.Fatalf("bad default username")
}
if o.Password != "" {
t.Fatalf("bad default password")
}
if o.KeepAlive != 30*time.Second {
t.Fatalf("bad default timeout")
}
}
func Test_NewClientOptions_mix(t *testing.T) {
o := NewClientOptions()
o.AddBroker("tcp://192.168.1.2:9999")
o.SetClientID("myclientid")
o.SetUsername("myuser")
o.SetPassword("mypassword")
o.SetKeepAlive(88)
if o.Servers[0].Scheme != "tcp" {
t.Fatalf("bad scheme")
}
if o.Servers[0].Host != "192.168.1.2:9999" {
t.Fatalf("bad host")
}
if o.ClientID != "myclientid" {
t.Fatalf("bad set clientid")
}
if o.Username != "myuser" {
t.Fatalf("bad set username")
}
if o.Password != "mypassword" {
t.Fatalf("bad set password")
}
if o.KeepAlive != 88 {
t.Fatalf("bad set timeout")
}
}
func Test_ModifyOptions(t *testing.T) {
o := NewClientOptions()
o.AddBroker("tcp://3.3.3.3:12345")
c := NewClient(o)
o.AddBroker("ws://2.2.2.2:9999")
o.SetOrderMatters(false)
if c.options.Servers[0].Scheme != "tcp" {
t.Fatalf("client options.server.Scheme was modified")
}
// if c.options.server.Host != "2.2.2.2:9999" {
// t.Fatalf("client options.server.Host was modified")
// }
if o.Order != false {
t.Fatalf("options.order was not modified")
}
}
func Test_TLSConfig(t *testing.T) {
o := NewClientOptions().SetTLSConfig(&tls.Config{
RootCAs: x509.NewCertPool(),
ClientAuth: tls.NoClientCert,
ClientCAs: x509.NewCertPool(),
InsecureSkipVerify: true})
c := NewClient(o)
if c.options.TLSConfig.ClientAuth != tls.NoClientCert {
t.Fatalf("client options.tlsConfig ClientAuth incorrect")
}
if c.options.TLSConfig.InsecureSkipVerify != true {
t.Fatalf("client options.tlsConfig InsecureSkipVerify incorrect")
}
}
func Test_OnConnectionLost(t *testing.T) {
onconnlost := func(client *Client, err error) {
panic(err)
}
o := NewClientOptions().SetConnectionLostHandler(onconnlost)
c := NewClient(o)
if c.options.OnConnectionLost == nil {
t.Fatalf("client options.onconnlost was nil")
}
}

View File

@@ -0,0 +1,62 @@
/*
* Copyright (c) 2013 IBM Corp.
*
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Eclipse Public License v1.0
* which accompanies this distribution, and is available at
* http://www.eclipse.org/legal/epl-v10.html
*
* Contributors:
* Seth Hoenig
* Allan Stockdill-Mander
* Mike Robertson
*/
package mqtt
import (
"bytes"
"git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/packets"
"testing"
)
func Test_NewPingReqMessage(t *testing.T) {
pr := packets.NewControlPacket(packets.Pingreq).(*packets.PingreqPacket)
if pr.MessageType != packets.Pingreq {
t.Errorf("NewPingReqMessage bad msg type: %v", pr.MessageType)
}
if pr.RemainingLength != 0 {
t.Errorf("NewPingReqMessage bad remlen, expected 0, got %d", pr.RemainingLength)
}
exp := []byte{
0xC0,
0x00,
}
var buf bytes.Buffer
pr.Write(&buf)
bs := buf.Bytes()
if len(bs) != 2 {
t.Errorf("NewPingReqMessage.Bytes() wrong length: %d", len(bs))
}
if exp[0] != bs[0] || exp[1] != bs[1] {
t.Errorf("NewPingMessage.Bytes() wrong")
}
}
func Test_DecodeMessage_pingresp(t *testing.T) {
bs := bytes.NewBuffer([]byte{
0xD0,
0x00,
})
presp, _ := packets.ReadPacket(bs)
if presp.(*packets.PingrespPacket).MessageType != packets.Pingresp {
t.Errorf("DecodeMessage ping response wrong msg type: %v", presp.(*packets.PingrespPacket).MessageType)
}
if presp.(*packets.PingrespPacket).RemainingLength != 0 {
t.Errorf("DecodeMessage ping response wrong rem len: %d", presp.(*packets.PingrespPacket).RemainingLength)
}
}

View File

@@ -0,0 +1,287 @@
/*
* Copyright (c) 2013 IBM Corp.
*
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Eclipse Public License v1.0
* which accompanies this distribution, and is available at
* http://www.eclipse.org/legal/epl-v10.html
*
* Contributors:
* Seth Hoenig
* Allan Stockdill-Mander
* Mike Robertson
*/
package mqtt
import (
"git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/packets"
"testing"
)
func Test_newRouter(t *testing.T) {
router, stop := newRouter()
if router == nil {
t.Fatalf("router is nil")
}
if stop == nil {
t.Fatalf("stop is nil")
}
if router.routes.Len() != 0 {
t.Fatalf("router.routes was not empty")
}
}
func Test_AddRoute(t *testing.T) {
router, _ := newRouter()
calledback := false
cb := func(client *Client, msg Message) {
calledback = true
}
router.addRoute("/alpha", cb)
if router.routes.Len() != 1 {
t.Fatalf("router.routes was wrong")
}
}
func Test_Match(t *testing.T) {
router, _ := newRouter()
router.addRoute("/alpha", nil)
if !router.routes.Front().Value.(*route).match("/alpha") {
t.Fatalf("match function is bad")
}
if router.routes.Front().Value.(*route).match("alpha") {
t.Fatalf("match function is bad")
}
}
func Test_match(t *testing.T) {
check := func(route, topic string, exp bool) {
result := routeIncludesTopic(route, topic)
if exp != result {
t.Errorf("match was bad R: %v, T: %v, EXP: %v", route, topic, exp)
}
}
// ** Basic **
R := ""
T := ""
check(R, T, true)
R = "x"
T = ""
check(R, T, false)
R = ""
T = "x"
check(R, T, false)
R = "x"
T = "x"
check(R, T, true)
R = "x"
T = "X"
check(R, T, false)
R = "alpha"
T = "alpha"
check(R, T, true)
R = "alpha"
T = "beta"
check(R, T, false)
// ** / **
R = "/"
T = "/"
check(R, T, true)
R = "/one"
T = "/one"
check(R, T, true)
R = "/"
T = "/two"
check(R, T, false)
R = "/two"
T = "/"
check(R, T, false)
R = "/two"
T = "two"
check(R, T, false) // a leading "/" creates a different topic
R = "/a/"
T = "/a"
check(R, T, false)
R = "/a/"
T = "/a/b"
check(R, T, false)
R = "/a/b"
T = "/a/b"
check(R, T, true)
R = "/a/b/"
T = "/a/b"
check(R, T, false)
R = "/a/b"
T = "/R/b"
check(R, T, false)
// ** + **
R = "/a/+/c"
T = "/a/b/c"
check(R, T, true)
R = "/+/b/c"
T = "/a/b/c"
check(R, T, true)
R = "/a/b/+"
T = "/a/b/c"
check(R, T, true)
R = "/a/+/+"
T = "/a/b/c"
check(R, T, true)
R = "/+/+/+"
T = "/a/b/c"
check(R, T, true)
R = "/+/+/c"
T = "/a/b/c"
check(R, T, true)
R = "/a/b/c/+" // different number of levels
T = "/a/b/c"
check(R, T, false)
R = "+"
T = "a"
check(R, T, true)
R = "/+"
T = "a"
check(R, T, false)
R = "+/+"
T = "/a"
check(R, T, true)
R = "+/+"
T = "a"
check(R, T, false)
// ** # **
R = "#"
T = "/a/b/c"
check(R, T, true)
R = "/#"
T = "/a/b/c"
check(R, T, true)
// R = "/#/" // not valid
// T = "/a/b/c"
// check(R, T, true)
R = "/#"
T = "/a/b/c"
check(R, T, true)
R = "/a/#"
T = "/a/b/c"
check(R, T, true)
R = "/a/#"
T = "/a/b/c"
check(R, T, true)
R = "/a/b/#"
T = "/a/b/c"
check(R, T, true)
// ** unicode **
R = "☃"
T = "☃"
check(R, T, true)
R = "✈"
T = "☃"
check(R, T, false)
R = "/☃/✈"
T = "/☃/ッ"
check(R, T, false)
R = "#"
T = "/☃/ッ"
check(R, T, true)
R = "/☃/+"
T = "/☃/ッ/♫/ø/☹☹☹"
check(R, T, false)
R = "/☃/#"
T = "/☃/ッ/♫/ø/☹☹☹"
check(R, T, true)
R = "/☃/ッ/♫/ø/+"
T = "/☃/ッ/♫/ø/☹☹☹"
check(R, T, true)
R = "/☃/ッ/+/ø/☹☹☹"
T = "/☃/ッ/♫/ø/☹☹☹"
check(R, T, true)
R = "/+/a/ッ/+/ø/☹☹☹"
T = "/b/♫/ッ/♫/ø/☹☹☹"
check(R, T, false)
R = "/+/♫/ッ/+/ø/☹☹☹"
T = "/b/♫/ッ/♫/ø/☹☹☹"
check(R, T, true)
}
func Test_MatchAndDispatch(t *testing.T) {
calledback := make(chan bool)
cb := func(c *Client, m Message) {
calledback <- true
}
pub := packets.NewControlPacket(packets.Publish).(*packets.PublishPacket)
pub.Qos = 2
pub.TopicName = "a"
pub.Payload = []byte("foo")
msgs := make(chan *packets.PublishPacket)
router, stopper := newRouter()
router.addRoute("a", cb)
router.matchAndDispatch(msgs, true, nil)
msgs <- pub
<-calledback
stopper <- true
select {
case msgs <- pub:
t.Errorf("msgs should not have a listener")
default:
}
}

View File

@@ -0,0 +1,668 @@
/*
* Copyright (c) 2013 IBM Corp.
*
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Eclipse Public License v1.0
* which accompanies this distribution, and is available at
* http://www.eclipse.org/legal/epl-v10.html
*
* Contributors:
* Seth Hoenig
* Allan Stockdill-Mander
* Mike Robertson
*/
package mqtt
import (
"bufio"
"fmt"
"git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/packets"
"io/ioutil"
"os"
"testing"
)
func Test_fullpath(t *testing.T) {
p := fullpath("/tmp/store", "o.44324")
e := "/tmp/store/o.44324.msg"
if p != e {
t.Fatalf("full path expected %s, got %s", e, p)
}
}
func Test_exists(t *testing.T) {
b := exists("/")
if !b {
t.Errorf("/proc/cpuinfo was not found")
}
}
func Test_exists_no(t *testing.T) {
b := exists("/this/path/is/not/real/i/hope")
if b {
t.Errorf("you have some strange files")
}
}
func isemptydir(dir string) bool {
chkcond(exists(dir))
files, err := ioutil.ReadDir(dir)
chkerr(err)
return len(files) == 0
}
func Test_mIDFromKey(t *testing.T) {
key := "i.123"
exp := uint16(123)
res := mIDFromKey(key)
if exp != res {
t.Fatalf("mIDFromKey failed")
}
}
func Test_inboundKeyFromMID(t *testing.T) {
id := uint16(9876)
exp := "i.9876"
res := inboundKeyFromMID(id)
if exp != res {
t.Fatalf("inboundKeyFromMID failed")
}
}
func Test_outboundKeyFromMID(t *testing.T) {
id := uint16(7654)
exp := "o.7654"
res := outboundKeyFromMID(id)
if exp != res {
t.Fatalf("outboundKeyFromMID failed")
}
}
/************************
**** persistOutbound ****
************************/
func Test_persistOutbound_connect(t *testing.T) {
ts := &TestStore{}
m := packets.NewControlPacket(packets.Connect).(*packets.ConnectPacket)
m.Qos = 0
m.Username = "user"
m.Password = []byte("pass")
m.ClientIdentifier = "cid"
//m := newConnectMsg(false, false, QOS_ZERO, false, "", nil, "cid", "user", "pass", 10)
persistOutbound(ts, m)
if len(ts.mput) != 0 {
t.Fatalf("persistOutbound put message it should not have")
}
if len(ts.mget) != 0 {
t.Fatalf("persistOutbound get message it should not have")
}
if len(ts.mdel) != 0 {
t.Fatalf("persistOutbound del message it should not have")
}
}
func Test_persistOutbound_publish_0(t *testing.T) {
ts := &TestStore{}
m := packets.NewControlPacket(packets.Publish).(*packets.PublishPacket)
m.Qos = 0
m.TopicName = "/popub0"
m.Payload = []byte{0xBB, 0x00}
m.MessageID = 40
persistOutbound(ts, m)
if len(ts.mput) != 0 {
t.Fatalf("persistOutbound put message it should not have")
}
if len(ts.mget) != 0 {
t.Fatalf("persistOutbound get message it should not have")
}
if len(ts.mdel) != 0 {
t.Fatalf("persistOutbound del message it should not have")
}
}
func Test_persistOutbound_publish_1(t *testing.T) {
ts := &TestStore{}
m := packets.NewControlPacket(packets.Publish).(*packets.PublishPacket)
m.Qos = 1
m.TopicName = "/popub1"
m.Payload = []byte{0xBB, 0x00}
m.MessageID = 41
persistOutbound(ts, m)
if len(ts.mput) != 1 || ts.mput[0] != 41 {
t.Fatalf("persistOutbound put message it should not have")
}
if len(ts.mget) != 0 {
t.Fatalf("persistOutbound get message it should not have")
}
if len(ts.mdel) != 0 {
t.Fatalf("persistOutbound del message it should not have")
}
}
func Test_persistOutbound_publish_2(t *testing.T) {
ts := &TestStore{}
m := packets.NewControlPacket(packets.Publish).(*packets.PublishPacket)
m.Qos = 2
m.TopicName = "/popub2"
m.Payload = []byte{0xBB, 0x00}
m.MessageID = 42
persistOutbound(ts, m)
if len(ts.mput) != 1 || ts.mput[0] != 42 {
t.Fatalf("persistOutbound put message it should not have")
}
if len(ts.mget) != 0 {
t.Fatalf("persistOutbound get message it should not have")
}
if len(ts.mdel) != 0 {
t.Fatalf("persistOutbound del message it should not have")
}
}
func Test_persistOutbound_puback(t *testing.T) {
ts := &TestStore{}
m := packets.NewControlPacket(packets.Puback).(*packets.PubackPacket)
persistOutbound(ts, m)
if len(ts.mput) != 0 {
t.Fatalf("persistOutbound put message it should not have")
}
if len(ts.mget) != 0 {
t.Fatalf("persistOutbound get message it should not have")
}
if len(ts.mdel) != 1 {
t.Fatalf("persistOutbound del message it should not have")
}
}
func Test_persistOutbound_pubrec(t *testing.T) {
ts := &TestStore{}
m := packets.NewControlPacket(packets.Pubrec).(*packets.PubrecPacket)
persistOutbound(ts, m)
if len(ts.mput) != 0 {
t.Fatalf("persistOutbound put message it should not have")
}
if len(ts.mget) != 0 {
t.Fatalf("persistOutbound get message it should not have")
}
if len(ts.mdel) != 0 {
t.Fatalf("persistOutbound del message it should not have")
}
}
func Test_persistOutbound_pubrel(t *testing.T) {
ts := &TestStore{}
m := packets.NewControlPacket(packets.Pubrel).(*packets.PubrelPacket)
m.MessageID = 43
persistOutbound(ts, m)
if len(ts.mput) != 1 || ts.mput[0] != 43 {
t.Fatalf("persistOutbound put message it should not have")
}
if len(ts.mget) != 0 {
t.Fatalf("persistOutbound get message it should not have")
}
if len(ts.mdel) != 0 {
t.Fatalf("persistOutbound del message it should not have")
}
}
func Test_persistOutbound_pubcomp(t *testing.T) {
ts := &TestStore{}
m := packets.NewControlPacket(packets.Pubcomp).(*packets.PubcompPacket)
persistOutbound(ts, m)
if len(ts.mput) != 0 {
t.Fatalf("persistOutbound put message it should not have")
}
if len(ts.mget) != 0 {
t.Fatalf("persistOutbound get message it should not have")
}
if len(ts.mdel) != 1 {
t.Fatalf("persistOutbound del message it should not have")
}
}
func Test_persistOutbound_subscribe(t *testing.T) {
ts := &TestStore{}
m := packets.NewControlPacket(packets.Subscribe).(*packets.SubscribePacket)
m.Topics = []string{"/posub"}
m.Qoss = []byte{1}
m.MessageID = 44
persistOutbound(ts, m)
if len(ts.mput) != 1 || ts.mput[0] != 44 {
t.Fatalf("persistOutbound put message it should not have")
}
if len(ts.mget) != 0 {
t.Fatalf("persistOutbound get message it should not have")
}
if len(ts.mdel) != 0 {
t.Fatalf("persistOutbound del message it should not have")
}
}
func Test_persistOutbound_unsubscribe(t *testing.T) {
ts := &TestStore{}
m := packets.NewControlPacket(packets.Unsubscribe).(*packets.UnsubscribePacket)
m.Topics = []string{"/posub"}
m.MessageID = 45
persistOutbound(ts, m)
if len(ts.mput) != 1 || ts.mput[0] != 45 {
t.Fatalf("persistOutbound put message it should not have")
}
if len(ts.mget) != 0 {
t.Fatalf("persistOutbound get message it should not have")
}
if len(ts.mdel) != 0 {
t.Fatalf("persistOutbound del message it should not have")
}
}
func Test_persistOutbound_pingreq(t *testing.T) {
ts := &TestStore{}
m := packets.NewControlPacket(packets.Pingreq)
persistOutbound(ts, m)
if len(ts.mput) != 0 {
t.Fatalf("persistOutbound put message it should not have")
}
if len(ts.mget) != 0 {
t.Fatalf("persistOutbound get message it should not have")
}
if len(ts.mdel) != 0 {
t.Fatalf("persistOutbound del message it should not have")
}
}
func Test_persistOutbound_disconnect(t *testing.T) {
ts := &TestStore{}
m := packets.NewControlPacket(packets.Disconnect)
persistOutbound(ts, m)
if len(ts.mput) != 0 {
t.Fatalf("persistOutbound put message it should not have")
}
if len(ts.mget) != 0 {
t.Fatalf("persistOutbound get message it should not have")
}
if len(ts.mdel) != 0 {
t.Fatalf("persistOutbound del message it should not have")
}
}
/************************
**** persistInbound ****
************************/
func Test_persistInbound_connack(t *testing.T) {
ts := &TestStore{}
m := packets.NewControlPacket(packets.Connack)
persistInbound(ts, m)
if len(ts.mput) != 0 {
t.Fatalf("persistInbound in bad state")
}
if len(ts.mget) != 0 {
t.Fatalf("persistInbound in bad state")
}
if len(ts.mdel) != 0 {
t.Fatalf("persistInbound in bad state")
}
}
func Test_persistInbound_publish_0(t *testing.T) {
ts := &TestStore{}
m := packets.NewControlPacket(packets.Publish).(*packets.PublishPacket)
m.Qos = 0
m.TopicName = "/pipub0"
m.Payload = []byte{0xCC, 0x01}
m.MessageID = 50
persistInbound(ts, m)
if len(ts.mput) != 0 {
t.Fatalf("persistInbound in bad state")
}
if len(ts.mget) != 0 {
t.Fatalf("persistInbound in bad state")
}
if len(ts.mdel) != 0 {
t.Fatalf("persistInbound in bad state")
}
}
func Test_persistInbound_publish_1(t *testing.T) {
ts := &TestStore{}
m := packets.NewControlPacket(packets.Publish).(*packets.PublishPacket)
m.Qos = 1
m.TopicName = "/pipub1"
m.Payload = []byte{0xCC, 0x02}
m.MessageID = 51
persistInbound(ts, m)
if len(ts.mput) != 1 || ts.mput[0] != 51 {
t.Fatalf("persistInbound in bad state")
}
if len(ts.mget) != 0 {
t.Fatalf("persistInbound in bad state")
}
if len(ts.mdel) != 0 {
t.Fatalf("persistInbound in bad state")
}
}
func Test_persistInbound_publish_2(t *testing.T) {
ts := &TestStore{}
m := packets.NewControlPacket(packets.Publish).(*packets.PublishPacket)
m.Qos = 2
m.TopicName = "/pipub2"
m.Payload = []byte{0xCC, 0x03}
m.MessageID = 52
persistInbound(ts, m)
if len(ts.mput) != 1 || ts.mput[0] != 52 {
t.Fatalf("persistInbound in bad state")
}
if len(ts.mget) != 0 {
t.Fatalf("persistInbound in bad state")
}
if len(ts.mdel) != 0 {
t.Fatalf("persistInbound in bad state")
}
}
func Test_persistInbound_puback(t *testing.T) {
ts := &TestStore{}
pub := packets.NewControlPacket(packets.Publish).(*packets.PublishPacket)
pub.Qos = 1
pub.TopicName = "/pub1"
pub.Payload = []byte{0xCC, 0x04}
pub.MessageID = 53
publishKey := inboundKeyFromMID(pub.MessageID)
ts.Put(publishKey, pub)
m := packets.NewControlPacket(packets.Puback).(*packets.PubackPacket)
m.MessageID = 53
persistInbound(ts, m) // "deletes" packets.Publish from store
if len(ts.mput) != 1 { // not actually deleted in TestStore
t.Fatalf("persistInbound in bad state")
}
if len(ts.mget) != 0 {
t.Fatalf("persistInbound in bad state")
}
if len(ts.mdel) != 1 || ts.mdel[0] != 53 {
t.Fatalf("persistInbound in bad state")
}
}
func Test_persistInbound_pubrec(t *testing.T) {
ts := &TestStore{}
pub := packets.NewControlPacket(packets.Publish).(*packets.PublishPacket)
pub.Qos = 2
pub.TopicName = "/pub2"
pub.Payload = []byte{0xCC, 0x05}
pub.MessageID = 54
publishKey := inboundKeyFromMID(pub.MessageID)
ts.Put(publishKey, pub)
m := packets.NewControlPacket(packets.Pubrec).(*packets.PubrecPacket)
m.MessageID = 54
persistInbound(ts, m)
if len(ts.mput) != 1 || ts.mput[0] != 54 {
t.Fatalf("persistInbound in bad state")
}
if len(ts.mget) != 0 {
t.Fatalf("persistInbound in bad state")
}
if len(ts.mdel) != 0 {
t.Fatalf("persistInbound in bad state")
}
}
func Test_persistInbound_pubrel(t *testing.T) {
ts := &TestStore{}
pub := packets.NewControlPacket(packets.Publish).(*packets.PublishPacket)
pub.Qos = 2
pub.TopicName = "/pub2"
pub.Payload = []byte{0xCC, 0x06}
pub.MessageID = 55
publishKey := inboundKeyFromMID(pub.MessageID)
ts.Put(publishKey, pub)
m := packets.NewControlPacket(packets.Pubrel).(*packets.PubrelPacket)
m.MessageID = 55
persistInbound(ts, m) // will overwrite publish
if len(ts.mput) != 2 {
t.Fatalf("persistInbound in bad state")
}
if len(ts.mget) != 0 {
t.Fatalf("persistInbound in bad state")
}
if len(ts.mdel) != 0 {
t.Fatalf("persistInbound in bad state")
}
}
func Test_persistInbound_pubcomp(t *testing.T) {
ts := &TestStore{}
m := packets.NewControlPacket(packets.Pubcomp).(*packets.PubcompPacket)
m.MessageID = 56
persistInbound(ts, m)
if len(ts.mput) != 0 {
t.Fatalf("persistInbound in bad state")
}
if len(ts.mget) != 0 {
t.Fatalf("persistInbound in bad state")
}
if len(ts.mdel) != 1 || ts.mdel[0] != 56 {
t.Fatalf("persistInbound in bad state")
}
}
func Test_persistInbound_suback(t *testing.T) {
ts := &TestStore{}
m := packets.NewControlPacket(packets.Suback).(*packets.SubackPacket)
m.MessageID = 57
persistInbound(ts, m)
if len(ts.mput) != 0 {
t.Fatalf("persistInbound in bad state")
}
if len(ts.mget) != 0 {
t.Fatalf("persistInbound in bad state")
}
if len(ts.mdel) != 1 || ts.mdel[0] != 57 {
t.Fatalf("persistInbound in bad state")
}
}
func Test_persistInbound_unsuback(t *testing.T) {
ts := &TestStore{}
m := packets.NewControlPacket(packets.Unsuback).(*packets.UnsubackPacket)
m.MessageID = 58
persistInbound(ts, m)
if len(ts.mput) != 0 {
t.Fatalf("persistInbound in bad state")
}
if len(ts.mget) != 0 {
t.Fatalf("persistInbound in bad state")
}
if len(ts.mdel) != 1 || ts.mdel[0] != 58 {
t.Fatalf("persistInbound in bad state")
}
}
func Test_persistInbound_pingresp(t *testing.T) {
ts := &TestStore{}
m := packets.NewControlPacket(packets.Pingresp)
persistInbound(ts, m)
if len(ts.mput) != 0 {
t.Fatalf("persistInbound in bad state")
}
if len(ts.mget) != 0 {
t.Fatalf("persistInbound in bad state")
}
if len(ts.mdel) != 0 {
t.Fatalf("persistInbound in bad state")
}
}
/***********
* restore *
***********/
func ensureRestoreDir() {
if exists("/tmp/restore") {
rerr := os.RemoveAll("/tmp/restore")
chkerr(rerr)
}
os.Mkdir("/tmp/restore", 0766)
}
func writeToRestore(fname, content string) {
f, cerr := os.Create("/tmp/restore/" + fname)
chkerr(cerr)
chkcond(f != nil)
w := bufio.NewWriter(f)
w.Write([]byte(content))
w.Flush()
f.Close()
}
func verifyFromRestore(fname, content string, t *testing.T) {
msg, oerr := os.Open("/tmp/restore/" + fname)
chkerr(oerr)
all, rerr := ioutil.ReadAll(msg)
chkerr(rerr)
msg.Close()
s := string(all)
if s != content {
t.Fatalf("verify content expected `%s` but got `%s`", content, s)
}
}
func Test_restore_1(t *testing.T) {
ensureRestoreDir()
writeToRestore("i.1.bkp", "this is critical 1")
restore("/tmp/restore")
chkcond(!exists("/tmp/restore/i.1.bkp"))
chkcond(exists("/tmp/restore/i.1.msg"))
verifyFromRestore("i.1.msg", "this is critical 1", t)
}
func Test_restore_2(t *testing.T) {
ensureRestoreDir()
writeToRestore("o.2.msg", "this is critical 2")
restore("/tmp/restore")
chkcond(!exists("/tmp/restore/o.2.bkp"))
chkcond(exists("/tmp/restore/o.2.msg"))
verifyFromRestore("o.2.msg", "this is critical 2", t)
}
func Test_restore_3(t *testing.T) {
ensureRestoreDir()
N := 20
// evens are .msg
// odds are .bkp
for i := 0; i < N; i++ {
content := fmt.Sprintf("foo %d bar", i)
if i%2 == 0 {
mname := fmt.Sprintf("i.%d.msg", i)
writeToRestore(mname, content)
} else {
mname := fmt.Sprintf("i.%d.bkp", i)
writeToRestore(mname, content)
}
}
restore("/tmp/restore")
for i := 0; i < N; i++ {
mname := fmt.Sprintf("i.%d.msg", i)
bname := fmt.Sprintf("i.%d.bkp", i)
content := fmt.Sprintf("foo %d bar", i)
chkcond(!exists("/tmp/restore/" + bname))
chkcond(exists("/tmp/restore/" + mname))
verifyFromRestore(mname, content, t)
}
}

View File

@@ -0,0 +1,47 @@
/*
* Copyright (c) 2013 IBM Corp.
*
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Eclipse Public License v1.0
* which accompanies this distribution, and is available at
* http://www.eclipse.org/legal/epl-v10.html
*
* Contributors:
* Seth Hoenig
* Allan Stockdill-Mander
* Mike Robertson
*/
package mqtt
import (
"testing"
)
func Test_ValidateTopicAndQos_qos3(t *testing.T) {
e := validateTopicAndQos("a", 3)
if e != ErrInvalidQos {
t.Fatalf("invalid error for invalid qos")
}
}
func Test_ValidateTopicAndQos_ES(t *testing.T) {
e := validateTopicAndQos("", 0)
if e != ErrInvalidTopicEmptyString {
t.Fatalf("invalid error for empty topic name")
}
}
func Test_ValidateTopicAndQos_a_0(t *testing.T) {
e := validateTopicAndQos("a", 0)
if e != nil {
t.Fatalf("error from valid NewTopicFilter")
}
}
func Test_ValidateTopicAndQos_H(t *testing.T) {
e := validateTopicAndQos("a/#/c", 0)
if e != ErrInvalidTopicMultilevel {
t.Fatalf("invalid error for bad multilevel topic filter")
}
}

View File

@@ -0,0 +1,24 @@
# Compiled Object files, Static and Dynamic libs (Shared Objects)
*.o
*.a
*.so
*.test
# Folders
_obj
_test
.vagrant
# Architecture specific extensions/prefixes
*.[568vq]
[568vq].out
*.cgo1.go
*.cgo2.c
_cgo_defun.c
_cgo_gotypes.go
_cgo_export.*
_testmain.go
*.exe

View File

@@ -0,0 +1,41 @@
language: go
go:
- 1.3.3
- 1.4.2
env:
global:
- KAFKA_PEERS=localhost:9091,localhost:9092,localhost:9093,localhost:9094,localhost:9095
- TOXIPROXY_ADDR=http://localhost:8474
- KAFKA_INSTALL_ROOT=/home/travis/kafka
- KAFKA_HOSTNAME=localhost
- DEBUG=true
matrix:
- KAFKA_VERSION=0.8.1.1
- KAFKA_VERSION=0.8.2.1
before_install:
- export REPOSITORY_ROOT=${TRAVIS_BUILD_DIR}
- vagrant/install_cluster.sh
- vagrant/boot_cluster.sh
- vagrant/create_topics.sh
install:
- make install_dependencies
script:
- make test
- make vet
- make errcheck
- make fmt
matrix:
include:
- go: tip
env: KAFKA_VERSION=0.8.2.1
allow_failures:
- go: tip
fast_finish: true
sudo: false

View File

@@ -0,0 +1,157 @@
# Changelog
#### Version 1.5.0 (unreleased)
New Features:
- TLS-encrypted network connections are now supported. This feature is subject
to change when Kafka releases built-in TLS support, but for now this is
enough to work with TLS-terminating proxies
([#154](https://github.com/Shopify/sarama/pull/154)).
Improvements:
- The consumer will not block if a single partition is not drained by the user;
all other partitions will continue to consume normally
([#485](https://github.com/Shopify/sarama/pull/485)).
- Formatting of error strings has been much improved
([#495](https://github.com/Shopify/sarama/pull/495)).
- Internal refactoring of the producer for code cleanliness and to enable
future work ([#300](https://github.com/Shopify/sarama/pull/300)).
Bug Fixes:
- Fix a potential deadlock in the consumer on shutdown
([#475](https://github.com/Shopify/sarama/pull/475)).
#### Version 1.4.3 (2015-07-21)
Bug Fixes:
- Don't include the partitioner in the producer's "fetch partitions"
circuit-breaker ([#466](https://github.com/Shopify/sarama/pull/466)).
- Don't retry messages until the broker is closed when abandoning a broker in
the producer ([#468](https://github.com/Shopify/sarama/pull/468)).
- Update the import path for snappy-go, it has moved again and the API has
changed slightly ([#486](https://github.com/Shopify/sarama/pull/486)).
#### Version 1.4.2 (2015-05-27)
Bug Fixes:
- Update the import path for snappy-go, it has moved from google code to github
([#456](https://github.com/Shopify/sarama/pull/456)).
#### Version 1.4.1 (2015-05-25)
Improvements:
- Optimizations when decoding snappy messages, thanks to John Potocny
([#446](https://github.com/Shopify/sarama/pull/446)).
Bug Fixes:
- Fix hypothetical race conditions on producer shutdown
([#450](https://github.com/Shopify/sarama/pull/450),
[#451](https://github.com/Shopify/sarama/pull/451)).
#### Version 1.4.0 (2015-05-01)
New Features:
- The consumer now implements `Topics()` and `Partitions()` methods to enable
users to dynamically choose what topics/partitions to consume without
instantiating a full client
([#431](https://github.com/Shopify/sarama/pull/431)).
- The partition-consumer now exposes the high water mark offset value returned
by the broker via the `HighWaterMarkOffset()` method ([#339](https://github.com/Shopify/sarama/pull/339)).
- Added a `kafka-console-consumer` tool capable of handling multiple
partitions, and deprecated the now-obsolete `kafka-console-partitionConsumer`
([#439](https://github.com/Shopify/sarama/pull/439),
[#442](https://github.com/Shopify/sarama/pull/442)).
Improvements:
- The producer's logging during retry scenarios is more consistent, more
useful, and slightly less verbose
([#429](https://github.com/Shopify/sarama/pull/429)).
- The client now shuffles its initial list of seed brokers in order to prevent
thundering herd on the first broker in the list
([#441](https://github.com/Shopify/sarama/pull/441)).
Bug Fixes:
- The producer now correctly manages its state if retries occur when it is
shutting down, fixing several instances of confusing behaviour and at least
one potential deadlock ([#419](https://github.com/Shopify/sarama/pull/419)).
- The consumer now handles messages for different partitions asynchronously,
making it much more resilient to specific user code ordering
([#325](https://github.com/Shopify/sarama/pull/325)).
#### Version 1.3.0 (2015-04-16)
New Features:
- The client now tracks consumer group coordinators using
ConsumerMetadataRequests similar to how it tracks partition leadership using
regular MetadataRequests ([#411](https://github.com/Shopify/sarama/pull/411)).
This adds two methods to the client API:
- `Coordinator(consumerGroup string) (*Broker, error)`
- `RefreshCoordinator(consumerGroup string) error`
Improvements:
- ConsumerMetadataResponses now automatically create a Broker object out of the
ID/address/port combination for the Coordinator; accessing the fields
individually has been deprecated
([#413](https://github.com/Shopify/sarama/pull/413)).
- Much improved handling of `OffsetOutOfRange` errors in the consumer.
Consumers will fail to start if the provided offset is out of range
([#418](https://github.com/Shopify/sarama/pull/418))
and they will automatically shut down if the offset falls out of range
([#424](https://github.com/Shopify/sarama/pull/424)).
- Small performance improvement in encoding and decoding protocol messages
([#427](https://github.com/Shopify/sarama/pull/427)).
Bug Fixes:
- Fix a rare race condition in the client's background metadata refresher if
it happens to be activated while the client is being closed
([#422](https://github.com/Shopify/sarama/pull/422)).
#### Version 1.2.0 (2015-04-07)
Improvements:
- The producer's behaviour when `Flush.Frequency` is set is now more intuitive
([#389](https://github.com/Shopify/sarama/pull/389)).
- The producer is now somewhat more memory-efficient during and after retrying
messages due to an improved queue implementation
([#396](https://github.com/Shopify/sarama/pull/396)).
- The consumer produces much more useful logging output when leadership
changes ([#385](https://github.com/Shopify/sarama/pull/385)).
- The client's `GetOffset` method will now automatically refresh metadata and
retry once in the event of stale information or similar
([#394](https://github.com/Shopify/sarama/pull/394)).
- Broker connections now have support for using TCP keepalives
([#407](https://github.com/Shopify/sarama/issues/407)).
Bug Fixes:
- The OffsetCommitRequest message now correctly implements all three possible
API versions ([#390](https://github.com/Shopify/sarama/pull/390),
[#400](https://github.com/Shopify/sarama/pull/400)).
#### Version 1.1.0 (2015-03-20)
Improvements:
- Wrap the producer's partitioner call in a circuit-breaker so that repeatedly
broken topics don't choke throughput
([#373](https://github.com/Shopify/sarama/pull/373)).
Bug Fixes:
- Fix the producer's internal reference counting in certain unusual scenarios
([#367](https://github.com/Shopify/sarama/pull/367)).
- Fix the consumer's internal reference counting in certain unusual scenarios
([#369](https://github.com/Shopify/sarama/pull/369)).
- Fix a condition where the producer's internal control messages could have
gotten stuck ([#368](https://github.com/Shopify/sarama/pull/368)).
- Fix an issue where invalid partition lists would be cached when asking for
metadata for a non-existant topic ([#372](https://github.com/Shopify/sarama/pull/372)).
#### Version 1.0.0 (2015-03-17)
Version 1.0.0 is the first tagged version, and is almost a complete rewrite. The primary differences with previous untagged versions are:
- The producer has been rewritten; there is now a `SyncProducer` with a blocking API, and an `AsyncProducer` that is non-blocking.
- The consumer has been rewritten to only open one connection per broker instead of one connection per partition.
- The main types of Sarama are now interfaces to make depedency injection easy; mock implementations for `Consumer`, `SyncProducer` and `AsyncProducer` are provided in the `github.com/Shopify/sarama/mocks` package.
- For most uses cases, it is no longer necessary to open a `Client`; this will be done for you.
- All the configuration values have been unified in the `Config` struct.
- Much improved test suite.

View File

@@ -0,0 +1,31 @@
# Contributing
Contributions are always welcome, both reporting issues and submitting pull requests!
### Reporting issues
Please make sure to include any potentially useful information in the issue, so we can pinpoint the issue faster without going back and forth.
- What SHA of Sarama are you running? If this is not the latest SHA on the master branch, please try if the problem persists with the latest version.
- You can set `sarama.Logger` to a [log.Logger](http://golang.org/pkg/log/#Logger) instance to capture debug output. Please include it in your issue description.
- Also look at the logs of the Kafka broker you are connected to. If you see anything out of the ordinary, please include it.
Also, please include the following information about your environment, so we can help you faster:
- What version of Kafka are you using?
- What version of Go are you using?
- What are the values of your Producer/Consumer/Client configuration?
### Submitting pull requests
We will gladly accept bug fixes, or additions to this library. Please fork this library, commit & push your changes, and open a pull request. Because this library is in production use by many people and applications, we code review all additions. To make the review process go as smooth as possible, please consider the following.
- If you plan to work on something major, please open an issue to discuss the design first.
- Don't break backwards compatibility. If you really have to, open an issue to discuss this first.
- Make sure to use the `go fmt` command to format your code according to the standards. Even better, set up your editor to do this for you when saving.
- Run [go vet](https://godoc.org/golang.org/x/tools/cmd/vet) to detect any suspicious constructs in your code that could be bugs.
- Explicitly handle all error return values. If you really want to ignore an error value, you can assign it to `_`.You can use [errcheck](https://github.com/kisielk/errcheck) to verify whether you have handled all errors.
- You may also want to run [golint](https://github.com/golang/lint) as well to detect style problems.
- Add tests that cover the changes you made. Make sure to run `go test` with the `-race` argument to test for race conditions.
- Make sure your code is supported by all the Go versions we support. You can rely on [Travis CI](https://travis-ci.org/Shopify/sarama) for testing older Go versions

View File

@@ -0,0 +1,20 @@
Copyright (c) 2013 Evan Huus
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

View File

@@ -0,0 +1,24 @@
default: fmt vet errcheck test
test:
go test -v -timeout 60s -race ./...
vet:
go vet ./...
errcheck:
errcheck github.com/Shopify/sarama/...
fmt:
@if [ -n "$$(go fmt ./...)" ]; then echo 'Please run go fmt on your code.' && exit 1; fi
install_dependencies: install_errcheck install_go_vet get
install_errcheck:
go get github.com/kisielk/errcheck
install_go_vet:
go get golang.org/x/tools/cmd/vet
get:
go get -t

View File

@@ -0,0 +1,31 @@
sarama
======
[![GoDoc](https://godoc.org/github.com/Shopify/sarama?status.png)](https://godoc.org/github.com/Shopify/sarama)
[![Build Status](https://travis-ci.org/Shopify/sarama.svg?branch=master)](https://travis-ci.org/Shopify/sarama)
Sarama is an MIT-licensed Go client library for Apache Kafka 0.8 (and later).
### Getting started
- API documentation and example are available via godoc at https://godoc.org/github.com/Shopify/sarama.
- Mocks for testing are available in the [mocks](./mocks) subpackage.
- The [examples](./examples) directory contains more elaborate example applications.
- The [tools](./tools) directory contains command line tools that can be useful for testing, diagnostics, and instrumentation.
- There is a google group for Kafka client users and authors at https://groups.google.com/forum/#!forum/kafka-clients
### Compatibility and API stability
Sarama provides a "2 releases + 2 months" compatibility guarantee: we support the two latest releases of Kafka
and Go, and we provide a two month grace period for older releases. This means we currently officially
support Go 1.3 and 1.4, and Kafka 0.8.1 and 0.8.2.
Sarama follows semantic versioning and provides API stability via the gopkg.in service.
You can import a version with a guaranteed stable API via http://gopkg.in/Shopify/sarama.v1.
A changelog is available [here](CHANGELOG.md).
### Other
* [Sarama wiki](https://github.com/Shopify/sarama/wiki) to get started hacking on sarama itself.
* [Kafka Project Home](https://kafka.apache.org/)
* [Kafka Protocol Specification](https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol)

View File

@@ -0,0 +1,22 @@
# -*- mode: ruby -*-
# vi: set ft=ruby :
# Vagrantfile API/syntax version. Don't touch unless you know what you're doing!
VAGRANTFILE_API_VERSION = "2"
MEMORY = 3072
Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
config.vm.box = "hashicorp/precise64"
config.vm.provision :shell, path: "vagrant/provision.sh"
config.vm.network "private_network", ip: "192.168.100.67"
config.vm.provider "vmware_fusion" do |v|
v.vmx["memsize"] = MEMORY.to_s
end
config.vm.provider "virtualbox" do |v|
v.memory = MEMORY
end
end

View File

@@ -0,0 +1,924 @@
package sarama
import (
"fmt"
"sync"
"time"
"github.com/eapache/go-resiliency/breaker"
"github.com/eapache/queue"
)
// AsyncProducer publishes Kafka messages using a non-blocking API. It routes messages
// to the correct broker for the provided topic-partition, refreshing metadata as appropriate,
// and parses responses for errors. You must read from the Errors() channel or the
// producer will deadlock. You must call Close() or AsyncClose() on a producer to avoid
// leaks: it will not be garbage-collected automatically when it passes out of
// scope.
type AsyncProducer interface {
// AsyncClose triggers a shutdown of the producer, flushing any messages it may have
// buffered. The shutdown has completed when both the Errors and Successes channels
// have been closed. When calling AsyncClose, you *must* continue to read from those
// channels in order to drain the results of any messages in flight.
AsyncClose()
// Close shuts down the producer and flushes any messages it may have buffered.
// You must call this function before a producer object passes out of scope, as
// it may otherwise leak memory. You must call this before calling Close on the
// underlying client.
Close() error
// Input is the input channel for the user to write messages to that they wish to send.
Input() chan<- *ProducerMessage
// Successes is the success output channel back to the user when AckSuccesses is enabled.
// If Return.Successes is true, you MUST read from this channel or the Producer will deadlock.
// It is suggested that you send and read messages together in a single select statement.
Successes() <-chan *ProducerMessage
// Errors is the error output channel back to the user. You MUST read from this channel
// or the Producer will deadlock when the channel is full. Alternatively, you can set
// Producer.Return.Errors in your config to false, which prevents errors to be returned.
Errors() <-chan *ProducerError
}
type asyncProducer struct {
client Client
conf *Config
ownClient bool
errors chan *ProducerError
input, successes, retries chan *ProducerMessage
inFlight sync.WaitGroup
brokers map[*Broker]chan<- *ProducerMessage
brokerRefs map[chan<- *ProducerMessage]int
brokerLock sync.Mutex
}
// NewAsyncProducer creates a new AsyncProducer using the given broker addresses and configuration.
func NewAsyncProducer(addrs []string, conf *Config) (AsyncProducer, error) {
client, err := NewClient(addrs, conf)
if err != nil {
return nil, err
}
p, err := NewAsyncProducerFromClient(client)
if err != nil {
return nil, err
}
p.(*asyncProducer).ownClient = true
return p, nil
}
// NewAsyncProducerFromClient creates a new Producer using the given client. It is still
// necessary to call Close() on the underlying client when shutting down this producer.
func NewAsyncProducerFromClient(client Client) (AsyncProducer, error) {
// Check that we are not dealing with a closed Client before processing any other arguments
if client.Closed() {
return nil, ErrClosedClient
}
p := &asyncProducer{
client: client,
conf: client.Config(),
errors: make(chan *ProducerError),
input: make(chan *ProducerMessage),
successes: make(chan *ProducerMessage),
retries: make(chan *ProducerMessage),
brokers: make(map[*Broker]chan<- *ProducerMessage),
brokerRefs: make(map[chan<- *ProducerMessage]int),
}
// launch our singleton dispatchers
go withRecover(p.dispatcher)
go withRecover(p.retryHandler)
return p, nil
}
type flagSet int8
const (
chaser flagSet = 1 << iota // message is last in a group that failed
shutdown // start the shutdown process
)
// ProducerMessage is the collection of elements passed to the Producer in order to send a message.
type ProducerMessage struct {
Topic string // The Kafka topic for this message.
Key Encoder // The partitioning key for this message. It must implement the Encoder interface. Pre-existing Encoders include StringEncoder and ByteEncoder.
Value Encoder // The actual message to store in Kafka. It must implement the Encoder interface. Pre-existing Encoders include StringEncoder and ByteEncoder.
// These are filled in by the producer as the message is processed
Offset int64 // Offset is the offset of the message stored on the broker. This is only guaranteed to be defined if the message was successfully delivered and RequiredAcks is not NoResponse.
Partition int32 // Partition is the partition that the message was sent to. This is only guaranteed to be defined if the message was successfully delivered.
Metadata interface{} // This field is used to hold arbitrary data you wish to include so it will be available when receiving on the Successes and Errors channels. Sarama completely ignores this field and is only to be used for pass-through data.
retries int
flags flagSet
}
func (m *ProducerMessage) byteSize() int {
size := 26 // the metadata overhead of CRC, flags, etc.
if m.Key != nil {
size += m.Key.Length()
}
if m.Value != nil {
size += m.Value.Length()
}
return size
}
func (m *ProducerMessage) clear() {
m.flags = 0
m.retries = 0
}
// ProducerError is the type of error generated when the producer fails to deliver a message.
// It contains the original ProducerMessage as well as the actual error value.
type ProducerError struct {
Msg *ProducerMessage
Err error
}
func (pe ProducerError) Error() string {
return fmt.Sprintf("kafka: Failed to produce message to topic %s: %s", pe.Msg.Topic, pe.Err)
}
// ProducerErrors is a type that wraps a batch of "ProducerError"s and implements the Error interface.
// It can be returned from the Producer's Close method to avoid the need to manually drain the Errors channel
// when closing a producer.
type ProducerErrors []*ProducerError
func (pe ProducerErrors) Error() string {
return fmt.Sprintf("kafka: Failed to deliver %d messages.", len(pe))
}
func (p *asyncProducer) Errors() <-chan *ProducerError {
return p.errors
}
func (p *asyncProducer) Successes() <-chan *ProducerMessage {
return p.successes
}
func (p *asyncProducer) Input() chan<- *ProducerMessage {
return p.input
}
func (p *asyncProducer) Close() error {
p.AsyncClose()
if p.conf.Producer.Return.Successes {
go withRecover(func() {
for _ = range p.successes {
}
})
}
var errors ProducerErrors
if p.conf.Producer.Return.Errors {
for event := range p.errors {
errors = append(errors, event)
}
}
if len(errors) > 0 {
return errors
}
return nil
}
func (p *asyncProducer) AsyncClose() {
go withRecover(p.shutdown)
}
// singleton
// dispatches messages by topic
func (p *asyncProducer) dispatcher() {
handlers := make(map[string]chan<- *ProducerMessage)
shuttingDown := false
for msg := range p.input {
if msg == nil {
Logger.Println("Something tried to send a nil message, it was ignored.")
continue
}
if msg.flags&shutdown != 0 {
shuttingDown = true
p.inFlight.Done()
continue
} else if msg.retries == 0 {
if shuttingDown {
// we can't just call returnError here because that decrements the wait group,
// which hasn't been incremented yet for this message, and shouldn't be
pErr := &ProducerError{Msg: msg, Err: ErrShuttingDown}
if p.conf.Producer.Return.Errors {
p.errors <- pErr
} else {
Logger.Println(pErr)
}
continue
}
p.inFlight.Add(1)
}
if (p.conf.Producer.Compression == CompressionNone && msg.Value != nil && msg.Value.Length() > p.conf.Producer.MaxMessageBytes) ||
(msg.byteSize() > p.conf.Producer.MaxMessageBytes) {
p.returnError(msg, ErrMessageSizeTooLarge)
continue
}
handler := handlers[msg.Topic]
if handler == nil {
handler = p.newTopicProducer(msg.Topic)
handlers[msg.Topic] = handler
}
handler <- msg
}
for _, handler := range handlers {
close(handler)
}
}
// one per topic
// partitions messages, then dispatches them by partition
type topicProducer struct {
parent *asyncProducer
topic string
input <-chan *ProducerMessage
breaker *breaker.Breaker
handlers map[int32]chan<- *ProducerMessage
partitioner Partitioner
}
func (p *asyncProducer) newTopicProducer(topic string) chan<- *ProducerMessage {
input := make(chan *ProducerMessage, p.conf.ChannelBufferSize)
tp := &topicProducer{
parent: p,
topic: topic,
input: input,
breaker: breaker.New(3, 1, 10*time.Second),
handlers: make(map[int32]chan<- *ProducerMessage),
partitioner: p.conf.Producer.Partitioner(topic),
}
go withRecover(tp.dispatch)
return input
}
func (tp *topicProducer) dispatch() {
for msg := range tp.input {
if msg.retries == 0 {
if err := tp.partitionMessage(msg); err != nil {
tp.parent.returnError(msg, err)
continue
}
}
handler := tp.handlers[msg.Partition]
if handler == nil {
handler = tp.parent.newPartitionProducer(msg.Topic, msg.Partition)
tp.handlers[msg.Partition] = handler
}
handler <- msg
}
for _, handler := range tp.handlers {
close(handler)
}
}
func (tp *topicProducer) partitionMessage(msg *ProducerMessage) error {
var partitions []int32
err := tp.breaker.Run(func() (err error) {
if tp.partitioner.RequiresConsistency() {
partitions, err = tp.parent.client.Partitions(msg.Topic)
} else {
partitions, err = tp.parent.client.WritablePartitions(msg.Topic)
}
return
})
if err != nil {
return err
}
numPartitions := int32(len(partitions))
if numPartitions == 0 {
return ErrLeaderNotAvailable
}
choice, err := tp.partitioner.Partition(msg, numPartitions)
if err != nil {
return err
} else if choice < 0 || choice >= numPartitions {
return ErrInvalidPartition
}
msg.Partition = partitions[choice]
return nil
}
// one per partition per topic
// dispatches messages to the appropriate broker
// also responsible for maintaining message order during retries
type partitionProducer struct {
parent *asyncProducer
topic string
partition int32
input <-chan *ProducerMessage
leader *Broker
breaker *breaker.Breaker
output chan<- *ProducerMessage
// highWatermark tracks the "current" retry level, which is the only one where we actually let messages through,
// all other messages get buffered in retryState[msg.retries].buf to preserve ordering
// retryState[msg.retries].expectChaser simply tracks whether we've seen a chaser message for a given level (and
// therefore whether our buffer is complete and safe to flush)
highWatermark int
retryState []partitionRetryState
}
type partitionRetryState struct {
buf []*ProducerMessage
expectChaser bool
}
func (p *asyncProducer) newPartitionProducer(topic string, partition int32) chan<- *ProducerMessage {
input := make(chan *ProducerMessage, p.conf.ChannelBufferSize)
pp := &partitionProducer{
parent: p,
topic: topic,
partition: partition,
input: input,
breaker: breaker.New(3, 1, 10*time.Second),
retryState: make([]partitionRetryState, p.conf.Producer.Retry.Max+1),
}
go withRecover(pp.dispatch)
return input
}
func (pp *partitionProducer) dispatch() {
// try to prefetch the leader; if this doesn't work, we'll do a proper call to `updateLeader`
// on the first message
pp.leader, _ = pp.parent.client.Leader(pp.topic, pp.partition)
if pp.leader != nil {
pp.output = pp.parent.getBrokerProducer(pp.leader)
}
for msg := range pp.input {
if msg.retries > pp.highWatermark {
// a new, higher, retry level; handle it and then back off
pp.newHighWatermark(msg.retries)
time.Sleep(pp.parent.conf.Producer.Retry.Backoff)
} else if pp.highWatermark > 0 {
// we are retrying something (else highWatermark would be 0) but this message is not a *new* retry level
if msg.retries < pp.highWatermark {
// in fact this message is not even the current retry level, so buffer it for now (unless it's a just a chaser)
if msg.flags&chaser == chaser {
pp.retryState[msg.retries].expectChaser = false
pp.parent.inFlight.Done() // this chaser is now handled and will be garbage collected
} else {
pp.retryState[msg.retries].buf = append(pp.retryState[msg.retries].buf, msg)
}
continue
} else if msg.flags&chaser == chaser {
// this message is of the current retry level (msg.retries == highWatermark) and the chaser flag is set,
// meaning this retry level is done and we can go down (at least) one level and flush that
pp.retryState[pp.highWatermark].expectChaser = false
pp.flushRetryBuffers()
pp.parent.inFlight.Done() // this chaser is now handled and will be garbage collected
continue
}
}
// if we made it this far then the current msg contains real data, and can be sent to the next goroutine
// without breaking any of our ordering guarantees
if pp.output == nil {
if err := pp.updateLeader(); err != nil {
pp.parent.returnError(msg, err)
time.Sleep(pp.parent.conf.Producer.Retry.Backoff)
continue
}
Logger.Printf("producer/leader/%s/%d selected broker %d\n", pp.topic, pp.partition, pp.leader.ID())
}
pp.output <- msg
}
if pp.output != nil {
pp.parent.unrefBrokerProducer(pp.leader, pp.output)
}
}
func (pp *partitionProducer) newHighWatermark(hwm int) {
Logger.Printf("producer/leader/%s/%d state change to [retrying-%d]\n", pp.topic, pp.partition, hwm)
pp.highWatermark = hwm
// send off a chaser so that we know when everything "in between" has made it
// back to us and we can safely flush the backlog (otherwise we risk re-ordering messages)
pp.retryState[pp.highWatermark].expectChaser = true
pp.parent.inFlight.Add(1) // we're generating a chaser message; track it so we don't shut down while it's still inflight
pp.output <- &ProducerMessage{Topic: pp.topic, Partition: pp.partition, flags: chaser, retries: pp.highWatermark - 1}
// a new HWM means that our current broker selection is out of date
Logger.Printf("producer/leader/%s/%d abandoning broker %d\n", pp.topic, pp.partition, pp.leader.ID())
pp.parent.unrefBrokerProducer(pp.leader, pp.output)
pp.output = nil
}
func (pp *partitionProducer) flushRetryBuffers() {
Logger.Printf("producer/leader/%s/%d state change to [flushing-%d]\n", pp.topic, pp.partition, pp.highWatermark)
for {
pp.highWatermark--
if pp.output == nil {
if err := pp.updateLeader(); err != nil {
pp.parent.returnErrors(pp.retryState[pp.highWatermark].buf, err)
goto flushDone
}
Logger.Printf("producer/leader/%s/%d selected broker %d\n", pp.topic, pp.partition, pp.leader.ID())
}
for _, msg := range pp.retryState[pp.highWatermark].buf {
pp.output <- msg
}
flushDone:
pp.retryState[pp.highWatermark].buf = nil
if pp.retryState[pp.highWatermark].expectChaser {
Logger.Printf("producer/leader/%s/%d state change to [retrying-%d]\n", pp.topic, pp.partition, pp.highWatermark)
break
} else if pp.highWatermark == 0 {
Logger.Printf("producer/leader/%s/%d state change to [normal]\n", pp.topic, pp.partition)
break
}
}
}
func (pp *partitionProducer) updateLeader() error {
return pp.breaker.Run(func() (err error) {
if err = pp.parent.client.RefreshMetadata(pp.topic); err != nil {
return err
}
if pp.leader, err = pp.parent.client.Leader(pp.topic, pp.partition); err != nil {
return err
}
pp.output = pp.parent.getBrokerProducer(pp.leader)
return nil
})
}
// one per broker, constructs both an aggregator and a flusher
func (p *asyncProducer) newBrokerProducer(broker *Broker) chan<- *ProducerMessage {
input := make(chan *ProducerMessage)
bridge := make(chan []*ProducerMessage)
a := &aggregator{
parent: p,
broker: broker,
input: input,
output: bridge,
}
go withRecover(a.run)
f := &flusher{
parent: p,
broker: broker,
input: bridge,
currentRetries: make(map[string]map[int32]error),
}
go withRecover(f.run)
return input
}
// groups messages together into appropriately-sized batches for sending to the broker
// based on https://godoc.org/github.com/eapache/channels#BatchingChannel
type aggregator struct {
parent *asyncProducer
broker *Broker
input <-chan *ProducerMessage
output chan<- []*ProducerMessage
buffer []*ProducerMessage
bufferBytes int
timer <-chan time.Time
}
func (a *aggregator) run() {
var output chan<- []*ProducerMessage
for {
select {
case msg := <-a.input:
if msg == nil {
goto shutdown
}
if a.wouldOverflow(msg) {
Logger.Printf("producer/aggregator/%d maximum request accumulated, forcing blocking flush\n", a.broker.ID())
a.output <- a.buffer
a.reset()
output = nil
}
a.buffer = append(a.buffer, msg)
a.bufferBytes += msg.byteSize()
if a.readyToFlush(msg) {
output = a.output
} else if a.parent.conf.Producer.Flush.Frequency > 0 && a.timer == nil {
a.timer = time.After(a.parent.conf.Producer.Flush.Frequency)
}
case <-a.timer:
output = a.output
case output <- a.buffer:
a.reset()
output = nil
}
}
shutdown:
if len(a.buffer) > 0 {
a.output <- a.buffer
}
close(a.output)
}
func (a *aggregator) wouldOverflow(msg *ProducerMessage) bool {
switch {
// Would we overflow our maximum possible size-on-the-wire? 10KiB is arbitrary overhead for safety.
case a.bufferBytes+msg.byteSize() >= int(MaxRequestSize-(10*1024)):
return true
// Would we overflow the size-limit of a compressed message-batch?
case a.parent.conf.Producer.Compression != CompressionNone && a.bufferBytes+msg.byteSize() >= a.parent.conf.Producer.MaxMessageBytes:
return true
// Would we overflow simply in number of messages?
case a.parent.conf.Producer.Flush.MaxMessages > 0 && len(a.buffer) >= a.parent.conf.Producer.Flush.MaxMessages:
return true
default:
return false
}
}
func (a *aggregator) readyToFlush(msg *ProducerMessage) bool {
switch {
// If all three config values are 0, we always flush as-fast-as-possible
case a.parent.conf.Producer.Flush.Frequency == 0 && a.parent.conf.Producer.Flush.Bytes == 0 && a.parent.conf.Producer.Flush.Messages == 0:
return true
// If the messages is a chaser we must flush to maintain the state-machine
case msg.flags&chaser == chaser:
return true
// If we've passed the message trigger-point
case a.parent.conf.Producer.Flush.Messages > 0 && len(a.buffer) >= a.parent.conf.Producer.Flush.Messages:
return true
// If we've passed the byte trigger-point
case a.parent.conf.Producer.Flush.Bytes > 0 && a.bufferBytes >= a.parent.conf.Producer.Flush.Bytes:
return true
default:
return false
}
}
func (a *aggregator) reset() {
a.timer = nil
a.buffer = nil
a.bufferBytes = 0
}
// takes a batch at a time from the aggregator and sends to the broker
type flusher struct {
parent *asyncProducer
broker *Broker
input <-chan []*ProducerMessage
currentRetries map[string]map[int32]error
}
func (f *flusher) run() {
var closing error
Logger.Printf("producer/flusher/%d starting up\n", f.broker.ID())
for batch := range f.input {
if closing != nil {
f.parent.retryMessages(batch, closing)
continue
}
msgSets := f.groupAndFilter(batch)
request := f.parent.buildRequest(msgSets)
if request == nil {
continue
}
response, err := f.broker.Produce(request)
switch err.(type) {
case nil:
break
case PacketEncodingError:
f.parent.returnErrors(batch, err)
continue
default:
Logger.Printf("producer/flusher/%d state change to [closing] because %s\n", f.broker.ID(), err)
f.parent.abandonBrokerConnection(f.broker)
_ = f.broker.Close()
closing = err
f.parent.retryMessages(batch, err)
continue
}
if response == nil {
// this only happens when RequiredAcks is NoResponse, so we have to assume success
f.parent.returnSuccesses(batch)
continue
}
f.parseResponse(msgSets, response)
}
Logger.Printf("producer/flusher/%d shut down\n", f.broker.ID())
}
func (f *flusher) groupAndFilter(batch []*ProducerMessage) map[string]map[int32][]*ProducerMessage {
msgSets := make(map[string]map[int32][]*ProducerMessage)
for i, msg := range batch {
if f.currentRetries[msg.Topic] != nil && f.currentRetries[msg.Topic][msg.Partition] != nil {
// we're currently retrying this partition so we need to filter out this message
f.parent.retryMessages([]*ProducerMessage{msg}, f.currentRetries[msg.Topic][msg.Partition])
batch[i] = nil
if msg.flags&chaser == chaser {
// ...but now we can start processing future messages again
Logger.Printf("producer/flusher/%d state change to [normal] on %s/%d\n",
f.broker.ID(), msg.Topic, msg.Partition)
delete(f.currentRetries[msg.Topic], msg.Partition)
}
continue
}
partitionSet := msgSets[msg.Topic]
if partitionSet == nil {
partitionSet = make(map[int32][]*ProducerMessage)
msgSets[msg.Topic] = partitionSet
}
partitionSet[msg.Partition] = append(partitionSet[msg.Partition], msg)
}
return msgSets
}
func (f *flusher) parseResponse(msgSets map[string]map[int32][]*ProducerMessage, response *ProduceResponse) {
// we iterate through the blocks in the request set, not the response, so that we notice
// if the response is missing a block completely
for topic, partitionSet := range msgSets {
for partition, msgs := range partitionSet {
block := response.GetBlock(topic, partition)
if block == nil {
f.parent.returnErrors(msgs, ErrIncompleteResponse)
continue
}
switch block.Err {
// Success
case ErrNoError:
for i := range msgs {
msgs[i].Offset = block.Offset + int64(i)
}
f.parent.returnSuccesses(msgs)
// Retriable errors
case ErrUnknownTopicOrPartition, ErrNotLeaderForPartition, ErrLeaderNotAvailable,
ErrRequestTimedOut, ErrNotEnoughReplicas, ErrNotEnoughReplicasAfterAppend:
Logger.Printf("producer/flusher/%d state change to [retrying] on %s/%d because %v\n",
f.broker.ID(), topic, partition, block.Err)
if f.currentRetries[topic] == nil {
f.currentRetries[topic] = make(map[int32]error)
}
f.currentRetries[topic][partition] = block.Err
f.parent.retryMessages(msgs, block.Err)
// Other non-retriable errors
default:
f.parent.returnErrors(msgs, block.Err)
}
}
}
}
// singleton
// effectively a "bridge" between the flushers and the dispatcher in order to avoid deadlock
// based on https://godoc.org/github.com/eapache/channels#InfiniteChannel
func (p *asyncProducer) retryHandler() {
var msg *ProducerMessage
buf := queue.New()
for {
if buf.Length() == 0 {
msg = <-p.retries
} else {
select {
case msg = <-p.retries:
case p.input <- buf.Peek().(*ProducerMessage):
buf.Remove()
continue
}
}
if msg == nil {
return
}
buf.Add(msg)
}
}
// utility functions
func (p *asyncProducer) shutdown() {
Logger.Println("Producer shutting down.")
p.inFlight.Add(1)
p.input <- &ProducerMessage{flags: shutdown}
p.inFlight.Wait()
if p.ownClient {
err := p.client.Close()
if err != nil {
Logger.Println("producer/shutdown failed to close the embedded client:", err)
}
}
close(p.input)
close(p.retries)
close(p.errors)
close(p.successes)
}
func (p *asyncProducer) buildRequest(batch map[string]map[int32][]*ProducerMessage) *ProduceRequest {
req := &ProduceRequest{RequiredAcks: p.conf.Producer.RequiredAcks, Timeout: int32(p.conf.Producer.Timeout / time.Millisecond)}
empty := true
for topic, partitionSet := range batch {
for partition, msgSet := range partitionSet {
setToSend := new(MessageSet)
setSize := 0
for _, msg := range msgSet {
var keyBytes, valBytes []byte
var err error
if msg.Key != nil {
if keyBytes, err = msg.Key.Encode(); err != nil {
p.returnError(msg, err)
continue
}
}
if msg.Value != nil {
if valBytes, err = msg.Value.Encode(); err != nil {
p.returnError(msg, err)
continue
}
}
if p.conf.Producer.Compression != CompressionNone && setSize+msg.byteSize() > p.conf.Producer.MaxMessageBytes {
// compression causes message-sets to be wrapped as single messages, which have tighter
// size requirements, so we have to respect those limits
valBytes, err := encode(setToSend)
if err != nil {
Logger.Println(err) // if this happens, it's basically our fault.
panic(err)
}
req.AddMessage(topic, partition, &Message{Codec: p.conf.Producer.Compression, Key: nil, Value: valBytes})
setToSend = new(MessageSet)
setSize = 0
}
setSize += msg.byteSize()
setToSend.addMessage(&Message{Codec: CompressionNone, Key: keyBytes, Value: valBytes})
empty = false
}
if p.conf.Producer.Compression == CompressionNone {
req.AddSet(topic, partition, setToSend)
} else {
valBytes, err := encode(setToSend)
if err != nil {
Logger.Println(err) // if this happens, it's basically our fault.
panic(err)
}
req.AddMessage(topic, partition, &Message{Codec: p.conf.Producer.Compression, Key: nil, Value: valBytes})
}
}
}
if empty {
return nil
}
return req
}
func (p *asyncProducer) returnError(msg *ProducerMessage, err error) {
msg.clear()
pErr := &ProducerError{Msg: msg, Err: err}
if p.conf.Producer.Return.Errors {
p.errors <- pErr
} else {
Logger.Println(pErr)
}
p.inFlight.Done()
}
func (p *asyncProducer) returnErrors(batch []*ProducerMessage, err error) {
for _, msg := range batch {
if msg != nil {
p.returnError(msg, err)
}
}
}
func (p *asyncProducer) returnSuccesses(batch []*ProducerMessage) {
for _, msg := range batch {
if msg == nil {
continue
}
if p.conf.Producer.Return.Successes {
msg.clear()
p.successes <- msg
}
p.inFlight.Done()
}
}
func (p *asyncProducer) retryMessages(batch []*ProducerMessage, err error) {
for _, msg := range batch {
if msg == nil {
continue
}
if msg.retries >= p.conf.Producer.Retry.Max {
p.returnError(msg, err)
} else {
msg.retries++
p.retries <- msg
}
}
}
func (p *asyncProducer) getBrokerProducer(broker *Broker) chan<- *ProducerMessage {
p.brokerLock.Lock()
defer p.brokerLock.Unlock()
bp := p.brokers[broker]
if bp == nil {
bp = p.newBrokerProducer(broker)
p.brokers[broker] = bp
p.brokerRefs[bp] = 0
}
p.brokerRefs[bp]++
return bp
}
func (p *asyncProducer) unrefBrokerProducer(broker *Broker, bp chan<- *ProducerMessage) {
p.brokerLock.Lock()
defer p.brokerLock.Unlock()
p.brokerRefs[bp]--
if p.brokerRefs[bp] == 0 {
close(bp)
delete(p.brokerRefs, bp)
if p.brokers[broker] == bp {
delete(p.brokers, broker)
}
}
}
func (p *asyncProducer) abandonBrokerConnection(broker *Broker) {
p.brokerLock.Lock()
defer p.brokerLock.Unlock()
delete(p.brokers, broker)
}

Some files were not shown because too many files have changed in this diff Show More