Compare commits

...

948 Commits

Author SHA1 Message Date
Cameron Sparr
69ad61bb30 processes: add 'unknown' procs (?) 2016-05-10 20:56:36 -07:00
Cameron Sparr
bb254fc2b9 Default docker timeout in case one is not defined in config 2016-05-10 14:18:55 -07:00
Cameron Sparr
aeadc2c43a Update etc/telegraf.conf, mqtt_cons readme 2016-05-10 14:18:55 -07:00
Cameron Sparr
ed492fe950 update influxdb & gopsutil deps 2016-05-10 14:18:55 -07:00
Cameron Sparr
775daba8f5 Change Version->version for consistency w/ influxdb 2016-05-10 14:18:55 -07:00
Cameron Sparr
677dd7ad53 Release 0.13 2016-05-10 14:18:55 -07:00
Cameron Sparr
85dee02a3b snmp plugin: change host -> snmp_host
closes #1156
2016-05-10 14:18:00 -07:00
Cameron Sparr
afdebbc3a2 Make OidInstanceMapping a field of the snmp host
fixes #1171
2016-05-10 10:15:01 -07:00
Jörg Thalheim
5deb22a539 docker: add container_id also to per cpu stats
currently this field exists only for total cpu usage

closes #1168
2016-05-09 16:43:27 -07:00
Ross McDonald
36b9e2e077 Merge pull request #1157 from influxdata/ross-build-updates
Minor fixes to build script
2016-05-06 11:28:48 -05:00
Ross McDonald
5348937c3d Choose correct configuration when building for windows. 2016-05-06 10:46:29 -05:00
Ross McDonald
72fcacbbc7 Minor fixes to build script:
- Fix for --name build parameter
- Remove rc parameter from build script
- Fix regression on first-level tarball directory structure
- Convert any dashes/underscores in version tag to tilde
2016-05-05 14:02:34 -05:00
Lukasz Jagiello
4c28f15b35 Fix #1148 - chatty MySQL
Two additional config options to reduce MySQL metrics

With:
 - gather_table_lock_waits = false
 - gather_event_waits = false

```
| wc -l
34
```

With:
 - gather_table_lock_waits = true
 - gather_event_waits = true

```
| wc -l
50040
```

closes #1148
closes #1149
2016-05-04 10:23:54 -06:00
Marko Crnic
095ef04c04 Fix formatting for haproxy tests
closes #1146
2016-05-04 09:47:39 -06:00
Marko Crnic
7d49979658 Update haproxy input plugin sample configuration 2016-05-04 09:46:44 -06:00
Marko Crnic
7a36695a21 Add tests for haproxy stats socket 2016-05-04 09:46:44 -06:00
Marko Crnic
5865587bd0 Add stats socket support to haproxy plugin 2016-05-04 09:46:44 -06:00
Jörg Thalheim
219bf93566 docker: add container_id to blkio metrics
all container metrics except blkio have this field

closes #1150
2016-05-04 09:28:13 -06:00
Ross McDonald
8371546a66 Disable circle uploads to S3 until more testing can be done for external PR's. 2016-05-03 11:26:52 -05:00
John Engelman
0b9b7bddd7 Specify host mount prefix with envvar.
closes #1120
2016-05-02 15:53:30 -06:00
Rene Zbinden
4c8449f4bc fix sysstat timeout error
closes #1134
2016-05-02 15:17:11 -06:00
Ross McDonald
36d7b5c9ab Improvements to build.py:
- Now uses Python argparse for cleaner handling of arguments
- Added function documentation
- Removed a few unneeded functions
- Updated nightly logic to incremement minor version
- Added support for building from specific branch or commit
- Changed --no-stash option to --no-uncommitted for clarity
- Added a --release flag, default package output will now contain the branch and commit hash in the version number
- Static builds are now listed as an architecture
- Changed default upload bucket to dl.influxdata.com/telegraf
- Don't include iteration in package name

closes #1040
2016-05-02 14:37:29 -06:00
Cameron Sparr
f2b0ea6722 value parser: doc & string handling 2016-05-02 12:17:20 -06:00
Cameron Sparr
46f4be88a6 Revert "exec plugin: allow using glob pattern in command list"
This reverts commit 6381efa7ce.
2016-05-02 12:07:17 -06:00
Jari Sukanen
6381efa7ce exec plugin: allow using glob pattern in command list
Allow using glob pattern in the command list in configuration. This enables for
example placing all commands in a single directory and using /path/to/dir/*.sh
as one of the commands to run all shell scripts in that directory.

Glob patterns are applied on every run of the commands, so matching commands can
be added without restarting telegraf.

closes #1127
2016-05-02 11:36:15 -06:00
Pierre Fersing
85ee66efb9 "DELAYED" Inserts were deprecated in MySQL 5.6.6
closes #1136
2016-05-02 11:23:28 -06:00
Victor Garcia
40dccf5b29 Metric for MongoDB jumbo chunks
closes #1128
2016-05-01 14:27:27 -06:00
Cameron Sparr
c114849a31 Use a timeout for docker list & stat cmds
closes #1133
2016-05-01 10:26:46 -06:00
Cameron Sparr
4e9798d0e6 agent and tags configs sometimes not applied
closes #1090
2016-04-29 19:44:01 -06:00
Cameron Sparr
a30b1a394f Kafka output: set max_retry=3 & required_acks=-1 as defaults
closes #1113
2016-04-29 18:51:45 -06:00
Cameron Sparr
91460436cf Changelog update 2016-04-29 12:32:04 -06:00
Cameron Sparr
3f807a9432 Implement timeouts for all exec command runners
First is to write an internal CombinedOutput and Run function with a
timeout.

Second, the following instances of command runners need to have timeouts:

    plugins/inputs/ping/ping.go
    125:	out, err := c.CombinedOutput()

    plugins/inputs/exec/exec.go
    91:	if err := cmd.Run(); err != nil {

    plugins/inputs/ipmi_sensor/command.go
    31:	err := cmd.Run()

    plugins/inputs/sysstat/sysstat.go
    194:	out, err := cmd.CombinedOutput()

    plugins/inputs/leofs/leofs.go
    185:	defer cmd.Wait()

    plugins/inputs/sysstat/sysstat.go
    282:	if err := cmd.Wait(); err != nil {

closes #1067
2016-04-29 12:06:22 -06:00
Cameron Sparr
cbe32c7482 Support default config paths
precedence will be:

 1. --config command-line option
 2. $TELEGRAF_CONFIG_PATH
 3. $HOME/.telegraf/telegraf.conf
 4. /etc/telegraf/telegraf.conf
2016-04-28 17:48:24 -06:00
Cameron Sparr
5d3c582ecf changelog update 2016-04-28 17:34:37 -06:00
Cameron Sparr
3ed006d216 Sanitize invalid opentsdb characters
closes #1098
2016-04-28 17:01:50 -06:00
Cameron Sparr
3e1026286b skip network-dependent unit tests in short mode 2016-04-28 14:44:08 -06:00
Cameron Sparr
b59266249d README fixups for udp_listener, statsd inputs
closes #1119
2016-04-28 13:11:41 -06:00
G-regL
015261a524 Sanitize Field name
Replace '/[sS]ec' for '_persec' and spaces with underscores.

closes #1118
2016-04-28 12:21:28 -06:00
Adithya B Cherunilam
024e1088eb Ensure sure that the post install script is compatible with RHEL 5
closes #1091
closes #1094
2016-04-28 11:58:06 -06:00
Cameron Sparr
08f4b1ae8a Update build to go 1.6.2 2016-04-28 11:41:16 -06:00
Bob Zoller
1390c22004 sanitize * to - in graphite serializer
closes #1110
2016-04-27 18:05:44 -06:00
Cameron Sparr
8742ead585 Change server_ -> jolokia_ in tags and other formatting 2016-04-27 16:00:58 -06:00
Cameron Sparr
59a297abe6 etc/telegraf.conf update 2016-04-27 15:46:21 -06:00
Simone Aiello
18636ea628 jolokia: use always POST
code refactor to use same prepareRequest method
for both 'agent' and 'proxy' mode

closes #1031
closes #1050
closes #473
2016-04-27 15:45:37 -06:00
Simone Aiello
cf5980ace2 jolokia: add proxy mode 2016-04-27 15:39:55 -06:00
Jesse Hanley
a7b0861436 Adding Jobstats support to Lustre2 input plugin
Lustre Jobstats allows for RPCs to be tagged with a value, such
as a job's ID.  This allows for per job statistics. This plugin
collects statistics and tags the data with the jobid.

closes #1107
2016-04-27 15:35:24 -06:00
Cameron Sparr
89f2c0b0a4 Cassandra: update plugin supported prefix & fix panic
fixes #1102
2016-04-27 15:23:05 -06:00
Cameron Sparr
ee4f4d7800 ping plugin: Set default timeout 2016-04-27 15:08:38 -06:00
Cameron Sparr
4de75ce621 Performance refactor of running_output buffers
closes #914
closes #967
2016-04-27 13:40:05 -06:00
John Engelman
1c4043ab39 Closes #1085 - allow for specifying AWS credentials in config.
closes #1085
closes #1086
2016-04-26 17:24:05 -06:00
Cameron Sparr
44c945b9f5 Tail unit tests and README tweaks 2016-04-26 10:43:41 -06:00
Cameron Sparr
c7719ac365 buffers: fix bug when Write called before AddMetric 2016-04-26 10:25:04 -06:00
Cameron Sparr
b9c24189e4 Tail input plugin 2016-04-26 09:42:06 -06:00
Cameron Sparr
411d8d7439 Fix leaky tcp connections in phpfpm plugin
closes #1089
2016-04-26 09:24:32 -06:00
Cameron Sparr
671b40df2a procstat: field prefix fixup 2016-04-25 20:10:34 -06:00
Cameron Sparr
249a860c6f procstat: fix newlines in tags 2016-04-25 19:57:38 -06:00
Mika Eloranta
0367a39e1f postgresql_extensible: custom address in metrics output
Allow overriding the the metrics "server" tag with the specified
value. Can be used to give a more user-friendly value for the server
name.

closes #1093
2016-04-25 16:33:35 -06:00
Mika Eloranta
1a7340bb02 postgresql_extensible: fix nil field values
nil field values would break the output influxdb line procotol.
Skip them from the output.
2016-04-25 16:33:33 -06:00
Mika Eloranta
ce7d852d22 postgresql_extensible: configurable measurement name
The output measurement name can be configured per query.
2016-04-25 16:33:33 -06:00
Hannu Valtonen
01b01c5969 postgresql_extensible: Censor also other security related conn params
While these aren't quite as sensitive as passwords, they do tend to be
long filesystem paths that shouldn't be reported along with
every measurement.
2016-04-25 16:33:33 -06:00
Pierre Fersing
c159460b2c Refactor running_output buffering
closes #1087
2016-04-25 16:32:41 -06:00
Cameron Sparr
07728d7425 Refactor globpath pkg to return a map
this is so that we don't call os.Stat twice for every file matched
by Match(). Also changing the behavior to _not_ return the name of a
file that doesn't exist if it's not a glob.
2016-04-24 14:37:44 -06:00
Cameron Sparr
d3a25e4dc1 globpath refactor into pkg separate from filestat 2016-04-23 11:56:33 -06:00
zensqlmonitor
1751c35f69 SQL Server input. Fix datatype conversion. 2016-04-23 09:18:08 +02:00
zensqlmonitor
93f5b8cc4a Fix datatype conversion 2016-04-23 09:14:04 +02:00
Cameron Sparr
5b1e59a48c filestat plugin config fixup 2016-04-22 19:15:07 -06:00
Cameron Sparr
7b27cad1ba Dont specify AWS credential chain, use default
closes #1078
2016-04-22 11:43:20 -06:00
Cameron Sparr
1b083d63ab add gitattributes file 2016-04-22 11:42:22 -06:00
Cameron Sparr
23f2b47531 Ignore errors in systemd
closes #1022
2016-04-22 11:23:24 -06:00
Victor Garcia
194288c00e Adding replication lag metric
closes #1066
2016-04-22 11:07:32 -06:00
Cameron Sparr
f9c8ed0dc3 Add filestat plugin to README 2016-04-21 19:47:23 -06:00
Cameron Sparr
88def9b71b filestat input plugin
closes #929
2016-04-21 16:53:02 -06:00
Martin Gehrke
f818f44693 Added Network Interface Object block to Generic Queries examples in win_perf_counters/README.md
Network metrics are pretty important and the block adds a couple with a link to the names for more. This adds a block with a few counters to the Generic Queries examples in plugins/inputs/win_perf_counters/README.md
2016-04-21 09:26:46 -04:00
Cameron Sparr
8a395fdb4a changelog update feature 1041 2016-04-20 18:36:14 -06:00
Cameron Sparr
c0588926b8 Add n_cpu field to system plugin
closes #1041
2016-04-20 18:22:04 -06:00
Cameron Sparr
f1b7ecb2a2 procstat: Add user, pidfile, pattern & exe tags
closes #1035
2016-04-20 13:18:07 -06:00
Cameron Sparr
4bcf157d88 Don't replace _ with . in datadog names
closes #1024
2016-04-20 09:06:13 -06:00
Cameron Sparr
2f7da03cce Do not log every tcp connect/disconnect
leaving as comments for whenever I rig up global debug logging.

closes #1062
2016-04-19 22:58:24 -06:00
Cameron Sparr
f1c995dcb8 Update etc/telegraf.conf 2016-04-19 18:01:41 -06:00
Cameron Sparr
9aec58c6b8 Don't allow inputs to overwrite host tag
closes #1054

This affects tags in the following plugins:

- cassandra
- disque
- rethinkdb
2016-04-19 17:44:33 -06:00
Victor Garcia
46aaaa9b70 Adding TTL metrics data
closes #1060
2016-04-19 17:02:25 -06:00
Larry Kim
46543d6323 Possible bug fix for oid_key collision
closes #1044
2016-04-19 17:00:04 -06:00
Cameron Sparr
a585119a67 Change prometheus doc to glob match 2016-04-19 14:46:37 -06:00
Cameron Sparr
8cc72368ca influxdb output: close connections & dont always overwrite
closes #1058
closes #1059

also see https://github.com/influxdata/influxdb/pull/6425
2016-04-19 13:40:08 -06:00
Cameron Sparr
92e57ee06c Set default tags in test accumulator
closes #1012
2016-04-18 19:24:17 -06:00
Eugene Chupriyanov
c737a19d9f Just close Riemann client on send metrics failure
Signed-off-by: Eugene Chupriyanov <e.chupriyanov@cpm.ru>

closes #1013
2016-04-18 17:25:36 -06:00
Eugene Chupriyanov
708a97d773 Try to reconnect to Riemann if metrics upload failed.
Signed-off-by: Eugene Chupriyanov <tchu@tchu.ru>

Error checks added

Don't Close() nil client

Signed-off-by: Eugene Chupriyanov <e.chupriyanov@cpm.ru>
2016-04-18 17:25:19 -06:00
Maksadbek
b95a90dbd6 updated README for mysql input plugin
closes #889
closes #403
2016-04-18 17:21:25 -06:00
Maksadbek
a2d1ee08d4 transposed the matrix of tags/fields for Lock Waits stats gathering 2016-04-18 17:11:26 -06:00
Maksadbek
7e64dc380f preventing tags from mutation by creating new tag for each metric 2016-04-18 17:11:26 -06:00
Maksadbek
046cb6a564 changed types to decrease needless uint64 to float64 casts 2016-04-18 17:11:26 -06:00
Maksadbek
644ce9edab fixed code regarding needless type casting; single creation of map 2016-04-18 17:11:26 -06:00
Maksadbek
059b601b13 mysql plugin conf field names are lowercase-underscored 2016-04-18 17:11:26 -06:00
Maksadbek
d59999f510 improvements on queries and additional comments 2016-04-18 17:11:26 -06:00
Maksadbek
c5d31e7527 statics that lack on MySQL 5.5 is turned off by default 2016-04-18 17:11:26 -06:00
Maksadbek
c121e38da6 mysql plugin, check for existence of table before scanning 2016-04-18 17:11:26 -06:00
Maksadbek
b16bc3d2e3 remove duplicate function; Mysql plugin GatherTableSchema is configurable 2016-04-18 17:11:26 -06:00
maksadbek
c732abbda2 Improved mysql plugin
shows global variables
	shows slave statuses
	shows size and count of binary log files
	shows information_schema.processlist stats
	shows perf table stats
	shows auto increments stats from information schema
	shows perf index stats
	shows table lock waits summary by table
	shows time and operations of event waits
	shows file event statuses
	shows events statements stats from perf_schema
	shows schema statistics
	refactored plugin, provided multiple fields per insert
2016-04-18 17:11:26 -06:00
Cameron Sparr
61d681a7c8 docker changelog update 2016-04-18 16:24:32 -06:00
Cameron Sparr
7828bc09cf Fixup docker blkio container name & docker doc 2016-04-18 16:20:46 -06:00
Cameron Sparr
36d330fea0 docker plugin schema refactor
- renaming cont_name and cont_image to container_name and
container_image.
- cont_id is now a field, called container_id
- docker_cpu, docker_mem, docker_net measurements have been renamed to
  docker_container_cpu, docker_container_mem, and docker_container_net

closes #1014
closes #1052
2016-04-18 15:16:59 -06:00
Cameron Sparr
4d46589d39 JSON input: make string ignores clear 2016-04-18 13:20:06 -06:00
chaton78
93f57edd3a Better logging for MQTT consumer
closes #1023
closes #921
2016-04-18 11:27:50 -06:00
chaton78
8ec8ae0587 Added onConnection and connectionLost Handlers 2016-04-18 11:25:03 -06:00
Pascal Larin
ce94e636bb Resubscribe if not using persistent sessions 2016-04-18 11:25:03 -06:00
Pascal Larin
21c7378b61 Handle onConnect 2016-04-18 11:25:03 -06:00
Thibault Cohen
75a9845d20 SNMP Fix #995
closes #995
2016-04-18 11:19:16 -06:00
Shahzheeb Khan
d638f6e411 mongodb readme and examples
mongodb readme and examples

closes #1039
2016-04-16 15:53:04 -06:00
Cameron Sparr
81d0a64d46 Adds support for removing/keeping tags from metrics
closes #706
2016-04-16 15:13:38 -06:00
Cameron Sparr
f76739cb1b Release 0.12.1 2016-04-14 16:16:26 -06:00
Ross McDonald
7f992fd321 Changed nohup fallback command to use 'sudo -u' so that Telegraf doesn't run as the root user. 2016-04-14 13:56:20 -06:00
Cameron Sparr
e428d11add Update etc/telegraf.conf, README 2016-04-12 14:50:56 -06:00
Subhachandra Chandra
0ed5b75a14 Fixed CHANGELOG with the correct pull request id.
closes #681
closes #1009
2016-04-12 10:57:34 -06:00
subhachandrachandra
b1b4adec74 Added cassandra plugin to access metrics using jolokia and push them to influxdb. 2016-04-12 10:55:59 -06:00
Cameron Sparr
1934cc2e62 Add memstats to the influxdb input plugin
closes #958
2016-04-12 10:13:11 -06:00
Lukasz Jagiello
ae8cf8c35e Fix plugin name in README 2016-04-11 19:43:30 -06:00
Cameron Sparr
f5878eafb9 Create a template system for the graphite serializer
closes #925
closes #879
2016-04-11 16:30:18 -06:00
Cameron Sparr
27fe4f7062 Update changelog, etc/telegraf.conf
closes #998
2016-04-08 12:00:41 -06:00
Michele Fadda
7ad8b26297 dovecot: enabled global, user and ip queries 2016-04-08 11:56:08 -06:00
Cameron Sparr
1a383b7d90 Telegraf no longer depends on lsof
so remove it as a dependency from the linux packages.

closes #974
2016-04-08 11:27:33 -06:00
Shahzheeb Khan
445946792e Adding few metrics example in jolokia plugin
Adding few metrics example in jolokia plugin

closes #993
2016-04-08 11:20:47 -06:00
Shahzheeb Khan
de82c7d5ac Adding few metrics example
Adding more metrics in example to make it easier to better understand the plugin
2016-04-08 11:20:42 -06:00
Cameron Sparr
07f0d561dc Eliminate byte buffer, copy scanner.Bytes directly 2016-04-08 09:50:03 -06:00
Cameron Sparr
be379f3dac Refactor UDP & TCP input buffers
closes #991
2016-04-08 09:50:03 -06:00
Cameron Sparr
1bf904fe60 Godeps: update paho mqtt client dep
this might fix #921

see https://github.com/eclipse/paho.mqtt.golang/issues/32
2016-04-07 17:32:28 -06:00
Cameron Sparr
c6faf005cb Add sysstat dummy file for non-linux builds 2016-04-07 12:08:26 -06:00
Rene Zbinden
b534b58542 fix tests
closes #939
2016-04-07 11:54:41 -06:00
Rene Zbinden
920711533e move pathe lookup for sadf to init() 2016-04-07 10:45:20 -06:00
Rene Zbinden
194110433e change sadf options so that it also works on older linux distributions 2016-04-07 10:45:20 -06:00
Rene Zbinden
7926396d2a cleanup code, set dfltActivities in init() function, this leads to an if less in collect() method 2016-04-07 10:45:20 -06:00
Rene Zbinden
797522e8ca change group=true by default 2016-04-07 10:45:20 -06:00
Rene Zbinden
64b5d1a269 add documentation about sadc path on different linux distributions 2016-04-07 10:45:20 -06:00
Rene Zbinden
d00d3802c9 fix build tags 2016-04-07 10:45:20 -06:00
Rene Zbinden
46fff13341 disable TestInterval with -race test option 2016-04-07 10:45:20 -06:00
Rene Zbinden
be3374a3ef remove interval configuration 2016-04-07 10:45:20 -06:00
Rene Zbinden
264ac0b017 fix race condition 2016-04-07 10:45:20 -06:00
Rene Zbinden
17033b3c6c change readme 2016-04-07 10:45:20 -06:00
Rene Zbinden
c4ea122d66 add sysstat plugin 2016-04-07 10:45:20 -06:00
Cameron Sparr
90185dc6b3 cleanup & comment http_response def config
closes #332
2016-04-07 10:37:52 -06:00
Luke Swithenbank
377b030d88 update to 5 second default and string map for headers 2016-04-07 10:28:39 -06:00
Luke Swithenbank
437bd87d7c added tests and did some refactoring 2016-04-07 10:28:39 -06:00
Luke Swithenbank
73a7916ce3 take a request body as a param 2016-04-07 10:28:39 -06:00
Luke Swithenbank
f947fa86e3 update to allow for following redirects 2016-04-07 10:28:39 -06:00
Luke Swithenbank
7219efbdb7 add the ability to parse http headers 2016-04-07 10:28:39 -06:00
Luke Swithenbank
b7435b9cd1 fmt 2016-04-07 10:28:39 -06:00
Luke Swithenbank
207ab5a0d1 update to make a working sample_config 2016-04-07 10:28:39 -06:00
Luke Swithenbank
70aa0ef85d add plugin to all 2016-04-07 10:28:39 -06:00
Luke Swithenbank
dfbe231a51 add http_response plugin 2016-04-07 10:28:39 -06:00
Cameron Sparr
cce35da366 Godeps_windows: update file 2016-04-07 10:00:10 -06:00
Cameron Sparr
1a612bcae9 Update README and etc/telegraf.conf 2016-04-06 13:49:50 -06:00
Josh Hardy
d5b9e003fe Add CloudWatch input plugin
Rebased commit of previously reviewed branch.
Added cloudwatch client Mock and more rich unit tests.

closes #935
closes #936
2016-04-06 13:45:06 -06:00
Sergio Jimenez
e19c474a92 input(docker): Cleanup
* Removed leftovers, unused code

closes #957
fixes #645
2016-04-06 12:01:36 -06:00
Sergio Jimenez
8274798499 fix(Godeps): Added github.com/opencontainers/runc 2016-04-06 11:55:30 -06:00
Sergio Jimenez
9f68a32934 fix(): Last link on README 2016-04-06 11:55:30 -06:00
Sergio Jimenez
5c688daff1 input(docker): Updated README
* Replaced links to fsouza/go-dockerclient by docker/engine-api
2016-04-06 11:55:30 -06:00
Sergio Jimenez
741fb1181f godeps(): Updated Godeps file for engine-api
* Added required deps for engine-api
* Removed fsouza/go-dockerclient
2016-04-06 11:55:30 -06:00
Sergio Jimenez
fd1f05c8e0 input(docker): Fixed io sectors/io_time recursive
* On engine-api sectors_recursive and io_time_recursive have no Op
2016-04-06 11:55:30 -06:00
Sergio Jimenez
708cbf937f input(docker): Fixed tests to work with engine-api
* Modified tests to work with engine-api
2016-04-06 11:55:30 -06:00
Sergio Jimenez
32213cad01 input(docker): docker/engine-api
* Made required changes to get it to compile
* First manual tests looking good, still unit tests need fixing
* Made go linter happier
2016-04-06 11:55:30 -06:00
Ricard Clau
9320a6e115 windows service docs
closes #954
2016-04-06 11:50:36 -06:00
Cameron Sparr
0f16c0f4cf Reduce TCP listener allocations 2016-04-05 17:37:09 -06:00
Cameron Sparr
30464396d9 Make the UDP input buffer only once 2016-04-05 17:35:43 -06:00
Cameron Sparr
64066c4ea8 Update input data format readme 2016-04-05 17:27:04 -06:00
Cameron Sparr
7e97787d9d More readme fixups 2016-04-05 16:17:45 -06:00
Cameron Sparr
40f2dd8c6c Readme fixup for exec plugin 2016-04-05 15:22:58 -06:00
Cameron Sparr
4dd364e1c3 Update all readme instances of data formats 2016-04-05 14:42:20 -06:00
Cameron Sparr
03f2a35b31 Update jolokia plugin readme 2016-04-05 13:54:02 -06:00
Martti Rannanjärvi
73bd98df57 dovecot: remove extra newline from stats query
Extra newline in the stats query is interpreted as an empty query
which is an error for dovecot.

closes #972
2016-04-05 10:54:27 -06:00
Armin Wolfermann
bcf1fc658d ipmi_sensors: Allow : in password
closes #969
2016-04-05 10:52:41 -06:00
Cameron Sparr
863cbe512d processes plugin: fix case where there are spaces in cmd name
fixes #968
2016-04-05 10:27:30 -06:00
Cameron Sparr
d871e9aee7 Dummy kernel plugin added for consistent config generation 2016-04-04 17:43:53 -06:00
Cameron Sparr
70ef61ac6d Release 0.12 2016-04-04 16:34:41 -06:00
Cameron Sparr
a4a140bfad etc/telegraf.conf update for procstat change 2016-04-04 16:30:24 -06:00
Florent Ramière
d2d91e713a Add plugin links
closes #964
2016-04-04 16:28:36 -06:00
Cameron Sparr
d9bb1ceaec Changelog update 2016-04-04 16:12:50 -06:00
Pierre Fersing
5fe8903fd2 Use timeout smaller than 10 seconds
closes #959
2016-04-04 16:10:23 -06:00
Cameron Sparr
8509101b83 drop cpu_time_* from procstat by default
closes #963
2016-04-04 16:10:09 -06:00
Cameron Sparr
357849c348 Godeps: update wvanbergen/kafka dependency
see https://github.com/wvanbergen/kafka/pull/87

fixes #805
2016-04-02 13:45:24 -06:00
Nikhil Bafna
0f1b4e06f5 Update README.md
Fix redis input plugin name in configuration example
2016-04-02 10:13:21 +05:30
Cameron Sparr
8e041420cd config: parse environment variables in the config file
closes #663
2016-04-01 16:06:19 -06:00
Rubycut
9211d22b2b Add writing in documentation.
closes #950
2016-04-01 11:46:32 -06:00
Cameron Sparr
f5246eb167 Update changelog with config file PR 2016-04-01 11:45:09 -06:00
Cameron Sparr
e436b2d720 Cleanup & standardize config file
changes:

- -sample-config will now comment out all but a few default plugins.
- config file parse errors will output path to bad conf file.
- cleanup 80-char line-length and some other style issues.
- default package conf file will now have all plugins, but commented
  out.

closes #199
closes #944
2016-04-01 10:59:53 -06:00
Florent Ramière
51f4e9c0d3 Update changelog
closes #945
2016-04-01 10:09:04 -06:00
Florent Ramière
8c3371c4ac Use numerical codes instead of symbolic ones 2016-04-01 10:08:55 -06:00
Florent Ramière
6ff0fc6d83 Add compression/acks/retry conf to Kafka output plugin
The following configuration is now possible

	## CompressionCodec represents the various compression codecs
recognized by Kafka in messages.
	##  "none" : No compression
	##  "gzip" : Gzip compression
	##  "snappy" : Snappy compression
	# compression_codec = "none"

	##  RequiredAcks is used in Produce Requests to tell the broker how
many replica acknowledgements it must see before responding
	##  "none" : the producer never waits for an acknowledgement from the
broker. This option provides the lowest latency but the weakest
durability guarantees (some data will be lost when a server fails).
	##  "leader" : the producer gets an acknowledgement after the leader
replica has received the data. This option provides better durability
as the client waits until the server acknowledges the request as
successful (only messages that were written to the now-dead leader but
not yet replicated will be lost).
	##  "leader_and_replicas" : the producer gets an acknowledgement after
all in-sync replicas have received the data. This option provides the
best durability, we guarantee that no messages will be lost as long as
at least one in sync replica remains.
	# required_acks = "leader_and_replicas"

	##  The total number of times to retry sending a message
	# max_retry = "3"
2016-04-01 10:08:55 -06:00
Cameron Sparr
9347a70425 Fix httpjson README
closes #947
2016-03-31 20:37:04 -06:00
Cameron Sparr
91957f0848 Update Godeps_windows file to HEAD 2016-03-30 14:43:05 -06:00
Cameron Sparr
62105bb353 Use github paho mqtt client instead of gerrit
this might fix #921
2016-03-30 11:54:01 -06:00
Rudenkovk Konstantin
e03f684508 Fix parse fcgi URI path in php-fpm input module
closes #934
2016-03-30 10:34:48 -06:00
Ross McDonald
2f41ae24f8 Swap systemd command, as it was causing issues on Debian. 2016-03-30 10:17:31 -06:00
Cameron Sparr
4ad551be9a add '*' to metric prefixes for consistency 2016-03-29 17:00:51 -06:00
Cameron Sparr
bd640ae2c5 changelog fixup 2016-03-29 12:19:07 -06:00
Cameron Sparr
2cfc882c62 changelog & readme update 2016-03-29 12:18:23 -06:00
Cameron Sparr
21ece2d76d Convert ipmi stats/tags to underscore and lowercase
closes #888
2016-03-29 11:39:57 -06:00
张光权
d055d7f496 Add the ipmi plugin 2016-03-29 11:39:57 -06:00
Cameron Sparr
b1cfb1afe4 Deprecate statsd convert_names option, expose separator
closes #876
2016-03-28 12:13:15 -06:00
Cameron Sparr
2f215356d6 Update statsd graphite parser link to telegraf version 2016-03-28 11:57:51 -06:00
Adam Argo
e07c79259b PR feedback changes
closes #927
2016-03-28 10:43:34 -06:00
Adam Argo
59085f072a adds ability to parse datadog-formatted tags in the statsd input 2016-03-28 10:43:26 -06:00
Cameron Sparr
474d6db42f Don't log every string metric that prometheus doesnt support 2016-03-23 16:01:06 -06:00
Thibault Cohen
a95710ed0c SNMP plugin fixes
fixes #873
2016-03-22 22:58:02 -06:00
JP
51d7724255 add verifyValue func for datadog and librato, bail if no good
closes #906
2016-03-22 15:22:57 -06:00
Cameron Sparr
276e7629bd memcached unix socket: fix panic. Do not recreate conn inside if
closes #841
2016-03-22 15:12:35 -06:00
Cameron Sparr
69606a45e0 Fix prometheus label names, and dont panic if invalid
fixes #907
2016-03-22 12:29:55 -06:00
Chris Goller
7f65ffcb15 Add optional parameters to influxdb output README 2016-03-22 09:14:25 -06:00
Cameron Sparr
4f5f6761f3 Update gopsutil dependency
closes #656
2016-03-22 09:13:31 -06:00
Cameron Sparr
f543dbb42f Allow users to tell telegraf Agent not to include host tag
closes #848
2016-03-21 15:51:10 -06:00
Cameron Sparr
5917a42997 influxdb output: quote the database name
closes #898
2016-03-21 14:37:33 -06:00
Cameron Sparr
fbe1664214 README cleanup and update 2016-03-21 14:30:59 -06:00
david birdsong
d09bb13cb6 special case 'value'
it usually connotes a single value type metric, appending just clutters

closes #793
2016-03-21 13:49:34 -06:00
david birdsong
31c323c097 fix prometheus output
if i understand the prometheus data model correctly, the current output
for this plugin is unusable

prometheus only accepts a single value per measurement. prior to this change, the range loop
causes a measurement to end up w/ a random value

for instance:

net,dc=sjc1,grp_dashboard=1,grp_home=1,grp_hwy_fetcher=1,grp_web_admin=1,host=sjc1-b4-8,hw=app,interface=docker0,state=live
bytes_recv=477596i,bytes_sent=152963303i,drop_in=0i,drop_out=0i,err_in=0i,err_out=0i,packets_recv=7231i,packets_sent=11460i
1457121990003778992

this 'net' measurent  would have all it's tags copied to prometheus
labels, but any of 152963303, or 0, or 7231 as a value for
'net' depending on which field is last in the map iteration

this change expands the fields into new measurements by appending
the field name to the influxdb measurement name.

ie, the above example results with 'net' dropped and new measurements
to take it's place:
	net_bytes_recv
	net_bytes_sent
	net_drop_in
	net_err_in
	net_packets_recv
	net_packets_sent

i hope this can be merged, i love telegraf's composability of tags and
filtering
2016-03-21 13:49:09 -06:00
Thibault Cohen
8f09aadfdf Add nagios parser for exec input plugin
closes #762
2016-03-21 13:34:47 -06:00
Chris H (CruftMaster)
20b4e8c779 GREEDY field templates for the graphite parser, and support for multiple specific field names
closes #789
2016-03-21 13:32:51 -06:00
Cameron Sparr
402a0108ae Merge pull request #896 from jipperinbham/graphite-tag-sanitizer
sanitize known issue characters from graphite tag name
2016-03-21 12:29:05 -06:00
Cameron Sparr
9de4a8efcf Update readme, changelog for couchbase plugin
closes #866
closes #482
2016-03-21 12:12:23 -06:00
Vebjorn Ljosa
077fa2e6b9 Improve README for couchabase input plugin
Proper terminology and case. Exmaples for tags. Example output.
2016-03-21 12:09:32 -06:00
Vebjorn Ljosa
2ae9316f48 Add examples in documentation for couchbase input plugin 2016-03-21 12:09:32 -06:00
Vebjorn Ljosa
9b5a90e3b9 Unit test couchbase input plugin 2016-03-21 12:09:32 -06:00
Vebjorn Ljosa
483942dc41 Comment on default pool name 2016-03-21 12:09:32 -06:00
Vebjorn Ljosa
2ddda6457f Convert measurement names to snake_case 2016-03-21 12:09:32 -06:00
Vebjorn Ljosa
681e695170 Don't copy lock when rangeing over map
Make `go vet` happy.
2016-03-21 12:09:32 -06:00
Vebjorn Ljosa
a043664dc4 Couchbase input plugin 2016-03-21 12:09:32 -06:00
JP
e940f99646 sanitize known issue characters from graphite tag name 2016-03-21 10:01:51 -05:00
Cameron Sparr
22073042a9 Merge pull request #891 from jipperinbham/librato-serialize-fix
replace @ character with - for librato
2016-03-18 17:00:00 -06:00
Cameron Sparr
2634cc408a Update README 2016-03-18 11:26:05 -06:00
Thomas Menard
36446bcbc2 Remove the columns used as tag
closes #844
2016-03-18 11:25:04 -06:00
Thomas Menard
b371ec5cf6 Add the postgresql_extensible plugin
This plugin is intended to add an extended support of Postgresql
compared to the legacy postgres plugin.

Basically, the plugin don’t have any metrics defined and it’s up to the
user to define it in the telegraph.conf (as a toml structure).

Each query can have it’s specific tags, and can be written specifically
using a where clause in order to eventually filter per database name.

To be more generic, a minimum postgresql version  has been defined per
query in case you have 2 different version of Postgresql running on the
same host.
2016-03-18 11:23:02 -06:00
HUANG Wei
18f4afb388 Inherit previous instance's stats in statsd plugin.
This way, after a reload, the stats wont restart again at least for the
counter type.

closes #887
2016-03-18 11:20:35 -06:00
Cameron Sparr
77dcbe95c0 Do not write metrics if there are 0 to write
closes #884
2016-03-18 10:54:51 -06:00
Cameron Sparr
061b749041 TLS config: if only given ssl_ca, create tls config anyways
fixes #890
2016-03-18 10:53:55 -06:00
JP
5b0c3951f6 replace @ character with - for librato 2016-03-18 11:25:51 -05:00
Cameron Sparr
f2394b5a8d Merge pull request #886 from entertainyou/typo
Fix typo, should be input instead of output.
2016-03-17 21:36:26 -06:00
Cameron Sparr
fe7b884cc9 Update changelog 2016-03-17 20:40:22 -06:00
Cameron Sparr
5c1b635229 Value parser, for parsing a single value into a metric
closes #849
2016-03-17 20:08:21 -06:00
HUANG Wei
63410491b7 Fix typo, should be input instead of output. 2016-03-18 10:06:44 +08:00
Cameron Sparr
26e0a4bbde Merge pull request #882 from VasuBalakrishnan/master
Fixed SQL Server Plugin issues #881
2016-03-17 19:41:42 -06:00
Balakrishnan
c356e56522 Updated Change log #881 2016-03-17 19:56:39 -04:00
Cameron Sparr
fd26bbbd0b Merge pull request #883 from ljagiello/minor-changelog-fix
Duplicated line
2016-03-17 17:09:08 -06:00
Lukasz Jagiello
7aa55371b5 Duplicate line 2016-03-17 15:54:22 -07:00
Balakrishnan
ba06533c3e Fixed SQL Server Plugin issues #881 2016-03-17 18:01:19 -04:00
Marcelo Salazar
d66d66e74b added json serializer
closes #878
2016-03-17 13:51:16 -06:00
Jonathan Chauncey
d6b5f3efe6 fix(prometheus): Add support for bearer token to prometheus input plugin
closes #864
merges #880
2016-03-17 13:47:22 -06:00
Cameron Sparr
b5a431624b Close UDP listener in udp_listener plugin
also adding waitgroups to udp_listener and statsd plugins to verify that
all goroutines have been cleaned up before Stop() exits.

closes #869
2016-03-17 10:51:35 -06:00
HUANG Wei
8e7284de5a fixup! Close the UDP connection in Stop() of statsd input plugin. 2016-03-17 10:51:35 -06:00
HUANG Wei
b2d38cd31c Close the UDP connection in Stop() of statsd input plugin.
If not, when doing reload, we may listen to the same port, we'll get
error about listen to already used address.
2016-03-17 10:51:35 -06:00
Cameron Sparr
a15fed35b7 Merge pull request #875 from Onefootball/feature/link-freebsd-package
Add FreeBSD tarball location to README
2016-03-17 10:51:21 -06:00
Dirk Pahl
eee6b0059c Add FreeBSD tarball location to README 2016-03-17 16:53:55 +01:00
Eugene Dementiev
530b4f3bee [amqp output] Allow external auth (cert-based tls auth)
closes #863
2016-03-16 19:03:41 -06:00
Thibault Cohen
bac1c223de Improve prometheus plugin
closes #707
2016-03-16 19:00:06 -06:00
marknmel
57f7582b4d Cleanup of Exec Inputs documentation - redux
Hi @sparrc

(Sorry for the noise - new pr)

closes #853

Please find some improvements to readability including the \n for the exec/telegraf line-protocol input.

I hope you (and others) find it easier to read.

/Mark

This is an ammend
2016-03-16 18:55:48 -06:00
Cameron Sparr
5afe819ebd Changelog update for 0.11.1 2016-03-16 14:55:40 -06:00
Cameron Sparr
59568f5311 Release 0.11.1 2016-03-16 14:45:35 -06:00
Cameron Sparr
822706367b provide args for telegraf for consistency with influxd:
- telegraf version
- telegraf config

closes #857
2016-03-16 14:22:01 -06:00
HUANG Wei
f8e9fafda3 Add reload configuration for telegraf service scripts.
closes #794
2016-03-16 11:20:46 -06:00
Cameron Sparr
c2bb9db012 Changelog update 2016-03-16 11:20:28 -06:00
Pierre Fersing
e4e7d7fbfc Improved install script for packaged telegraf:
* Start/stop service on Debian/Ubuntu
* Disable init-script/Systemd-unit on package removal

closes #747
2016-03-16 11:17:28 -06:00
Cameron Sparr
035e4cf90a Fix bug with httpjson client pointer receiver
fixes #859
2016-03-16 10:57:15 -06:00
Cameron Sparr
18fff4a3f5 Merge pull request #858 from LordFPL/patch-1
(very) Little error in changelog
2016-03-16 09:28:30 -06:00
Ross McDonald
675b6dc305 Corrected issue with windows builds where the correct configuration
and filesystem would be used.

closes #852
closes #854
2016-03-16 09:27:09 -06:00
LordFPL
4071c78b2b (very) Little error in changelog
We are not going in the past, no ? ;)
2016-03-16 08:57:33 +01:00
Cameron Sparr
2cb32a683e README fixes for 0.11.0 2016-03-15 11:19:40 +00:00
Cameron Sparr
b6dc9c004b Release 0.11.0 2016-03-14 17:19:46 +00:00
Cameron Sparr
4ea0c707c1 Input plugin for running ntp queries
see #235
2016-03-14 16:53:37 +00:00
Thomas Menard
2fbcb5c6d8 Fix postgresql password exposure in metrics
Fix the password exposure in the metrics or tags.

closes #821
closes #845
2016-03-14 11:00:30 +00:00
Cameron Sparr
a4d60d9750 Update Godeps_windows
closes #839
2016-03-14 10:49:28 +00:00
Cameron Sparr
d3925890b1 github wh: return from eventHandler when err != nil
closes #837
2016-03-14 10:34:58 +00:00
Cameron Sparr
8c6c144f28 influxdb output: If all write fails, trigger a reconnect
closes #836
2016-03-14 10:28:01 +00:00
Cameron Sparr
db8c24cc7b Add a "kernel" plugin for /proc/stat statistics
see #235
2016-03-11 14:50:45 +01:00
Thibault Cohen
ecbbb8426f Fix #828
closes #828
closes #829
2016-03-11 12:21:29 +01:00
Thibault Cohen
0752879fc8 SNMP fix concurrency issue
closes #823
2016-03-10 12:04:29 +01:00
Chris Goller
3f2a04b25b Fix build-for-docker Makefile target syntax.
closes #819
2016-03-09 22:58:29 +01:00
Cameron Sparr
aa15e7916e processes: Fix zombie process procfs panic
fixes #822
2016-03-09 22:55:26 +01:00
Cameron Sparr
7b09623fa8 Add number of users to 'system' plugin
see #235
2016-03-09 19:27:22 +01:00
Cameron Sparr
2f45b8b7f5 Cross platform support for the 'processes' plugin
closes #798
2016-03-09 15:47:37 +01:00
Thibault Cohen
5ffa2a30be Add processes status stats in system input plugin 2016-03-09 15:47:37 +01:00
Cameron Sparr
b102ae141a CONFIGURATION drop->fielddrop 2016-03-09 15:46:37 +01:00
Cameron Sparr
845abcdd77 Only log the overwritten metric warning on 1st overwrite per buffer
see #807
2016-03-09 14:44:32 +01:00
Cameron Sparr
805db7ca50 Break out fcgi code into orig Go files, don't ignore errs
closes #816
2016-03-09 13:44:11 +01:00
Prune Sebastien THOMAS
bd3d0c330f parsed with gofmt
closes #776
2016-03-07 18:48:02 +01:00
Prune Sebastien THOMAS
0060df9877 added zookeeper_chroot option
added a plugin option zookeeper_chroot to set up the kafka endpoint in zookeeper, which may not be / (default).
This chroot is then configured in the consumergroup config.Zookeeper.Chroot

This is workaround the fact that this plugins does not handle the urls like    "zookeeper_server:port/chroot"
As the peers are stored in an array, it makes no sens to have them beeing URL. Peers should all be members of the same cluster, so they all have the same chroot.
2016-03-07 18:46:10 +01:00
Thibault Cohen
cd66e203bd Improve procstat
closes #799
2016-03-07 17:57:32 +01:00
Cameron Sparr
240f99478a Prevent Inf and NaN from being added, and unit test Accumulator
closes #803
2016-03-07 15:46:23 +01:00
Cameron Sparr
41534c73f0 mqtt_consumer: option to set persistent session and client ID
closes #797
2016-03-07 14:34:42 +01:00
Matt Morrison
6139a69fa8 [SNMP Input] SNMPMap() loops forever if table has more than 32 entries
closes #800
closes #801
2016-03-07 12:18:55 +01:00
Cameron Sparr
3cca312e61 Adding a TCP input listener
closes #481
2016-03-07 12:10:28 +01:00
Cameron Sparr
7e312797ec Grammar corrections and consistency for output-list, input-list
closes #788
2016-03-07 11:42:01 +01:00
张光权
fe44fa648a Fix the incorrect indent of input-list help message 2016-03-07 11:33:03 +01:00
张光权
3249030257 add flags '-input-list' and '-output-list' for telegraf command 2016-03-07 11:33:03 +01:00
张光权
8f98c20c51 Add flags -usage-list to print all plugins inputs for telegraf 2016-03-07 11:33:03 +01:00
Thibault Cohen
1c76d5d096 Improve docker input plugin
closes #754
2016-03-07 11:24:32 +01:00
Cameron Sparr
35f1e28809 Merge pull request #790 from arthtux/master
Add README.md for redis
2016-03-04 17:46:12 +00:00
Arthur Deschamps
20999979de Update redis.go 2016-03-04 07:22:54 -05:00
arthtux
c6706a86f1 add README.md for redis 2016-03-03 20:20:03 -05:00
Ross McDonald
b4b1866286 Removed test functionality from build script.
closes #708
closes #713
2016-03-03 21:37:35 +00:00
Ross McDonald
28eb9b4c29 Fixed issue where binary wasnt copied to packaging directory correctly. 2016-03-03 21:34:57 +00:00
Ross McDonald
0a9accccc1 Added permissions check to post-install script due to issues with RPMs having the incorrect permissions on the log directory. 2016-03-03 21:34:57 +00:00
Ross McDonald
c3d220175f Removed i386 as a target for darwin, as it currently doesnt compile. 2016-03-03 21:34:57 +00:00
Ross McDonald
095c90ad22 Re-added zip package output format. Modified zip and tar packaging process to use the base 'tar' and 'zip' commands, instead of 'fpm'. 2016-03-03 21:34:57 +00:00
Ross McDonald
a77bfecb02 Updates to build script to improve ARM builds and other functionality. 2016-03-03 21:34:57 +00:00
Thibault Cohen
72027b5b3c Add README.md for snmp input plugin
closes #735
closes #773
closes #540
2016-03-03 15:54:34 +00:00
Thibault Cohen
e5503c56ad Fix #773 2016-03-03 15:50:36 +00:00
Thibault Cohen
ee7b225272 Add snmp table feature #540 2016-03-03 15:50:36 +00:00
Cameron Sparr
03d37725a9 dns_query unit tests, require that field exists 2016-03-03 15:44:16 +00:00
Pierre Fersing
29d1cbb673 Reduce metric_buffer_limit to 1000
closes #780
2016-03-03 15:32:44 +00:00
Auke Willem Oosterhoff
e81278b800 Fix bug in sample code.
closes #784
closes #785
2016-03-03 15:31:37 +00:00
Manuel Sangoi
e5482a5725 Do not ignore username option for mqtt output 2016-03-03 15:27:37 +00:00
Pascal Larin
8464be691e Username not set for mqtt_consumer plugin
Username parameter for the mqtt_consumer plugin was not pass to the client because an incorrect empty check.

closes #781
2016-03-03 12:17:19 +00:00
Cameron Sparr
ed9937bbd8 Update all dependency hashes 2016-03-02 10:23:01 +00:00
Cameron Sparr
b2a4d4a018 Allow ssl option specification for httpjson plugin
closes #769
2016-03-01 18:17:19 +00:00
Cameron Sparr
74aaf4f75b Add udp listener to readme list of plugins 2016-03-01 15:46:29 +00:00
Cameron Sparr
2945f9daa9 Changelog update 2016-03-01 15:11:37 +00:00
Cameron Sparr
3b496ab3d8 udp listener: add os buffer size notes & change default port
- using 8092 as the default port because it's similar to the rest of
  the TICK stack (InfluxDB, for example, uses 8083, 8086, 8088, etc.).
  didn't want to use 8125 because that conflicts with statsd.

closes #758
2016-03-01 15:01:07 +00:00
Andrea Leopardi
e1f30aeff9 Add a README for the UDP listener input plugin 2016-03-01 11:24:49 +01:00
Andrea Leopardi
a92e73231d Add tests for the udp_listener input plugin 2016-03-01 11:24:49 +01:00
Aleksei Magusev and Andrea Leopardi
8d91115623 Add generic UDP listener service input 2016-03-01 11:24:49 +01:00
Cameron Sparr
9af8d6912a Remove naoina/toml dependency, use influxdata/toml
closes #745
2016-03-01 10:17:02 +00:00
Pierre Fersing
fe43fb47e1 Fix test
closes #771
2016-03-01 09:44:52 +00:00
Pierre Fersing
ca3a80fbe1 Fix invalid DSN after dsnAddTimeout and "" DSN 2016-03-01 09:43:28 +00:00
Pierre Fersing
f0747e76da Fix newly added test 2016-03-01 09:43:28 +00:00
Pierre Fersing
7416d6ea71 Improve timeout in input plugins 2016-03-01 09:43:28 +00:00
Dirk Pahl
ea7cbc781e Create a FreeBSD build
closes #766
2016-03-01 09:38:58 +00:00
Cameron Sparr
3568fb9f93 Support specifying influxdb retention policy
closes #692
2016-02-29 18:10:32 +00:00
Cameron Sparr
43b7ce4f6d Merge pull request #764 from arthtux/master
Readme for nginx plugin
2016-02-29 11:36:32 +00:00
bastard
baa38d6266 Fixing Librato plugin
closes #722
2016-02-29 11:35:45 +00:00
arthtux
1677960caa correct nginx README 2016-02-28 15:41:16 -05:00
arthtux
0fab573c98 add nginx description 2016-02-28 15:38:46 -05:00
Cameron Sparr
04a8e5b888 influxdb output: try to connect on write if there are no conns 2016-02-26 16:26:43 +00:00
Cameron Sparr
6284e2011c Fix sensor plugin, was splitting on ":" incorrectly
closes #748
2016-02-26 15:21:05 +00:00
Cameron Sparr
a97c93abe4 add usage_percent into docker readme
closes #726
2016-02-26 15:12:37 +00:00
Cameron Sparr
664816383a Update readme to 0.10.4.1 2016-02-24 10:05:10 -07:00
Cameron Sparr
fc4cb1654c Fix deb and rpm packages
closes #752
closes #750
2016-02-24 09:12:14 -07:00
Cameron Sparr
f1fa915985 Release 0.10.4 w/ windows builds 2016-02-23 16:00:39 -07:00
Cameron Sparr
11482a75a1 Changelog update, field and name drop and pass params 2016-02-23 15:47:31 -07:00
Matt Heath
e983d35c25 Add support for multiple field names for timers
closes #737
2016-02-23 15:43:29 -07:00
Cameron Sparr
85c4f753ad modify Windows default conf to use win perf over WMI 2016-02-23 15:40:02 -07:00
Cameron Sparr
1847ce3f3d Experimental windows build process changes 2016-02-23 13:52:24 -07:00
Cameron Sparr
83c27cc7b1 dns query: Don't use mjasion.pl for unit tests, check errs 2016-02-23 12:30:18 -07:00
Cameron Sparr
3e8f96a463 httpjson: add unit test to verify that POST params get passed 2016-02-23 11:01:23 -07:00
Cameron Sparr
69e4f16b13 Fix bad http GET parameter encoding, add unit test 2016-02-23 10:07:56 -07:00
Cameron Sparr
918c3fb260 httpjson test real response from issue #729 2016-02-23 09:34:01 -07:00
Cameron Sparr
54ee44839c Put arm deb and rpm downloads on readme 2016-02-22 16:59:45 -07:00
Cameron Sparr
8362aa9d66 Some windows build script fixes 2016-02-22 15:12:35 -07:00
Cameron Sparr
2a6ff16819 Fix up config panic points for naoina/toml support
closes #736
2016-02-22 14:44:33 -07:00
Pierre Fersing
47ad73cc89 Ignore boring filesystems from disk plugin
Modern Linux has a lots of boring filesystem (tmpfs on /dev, devpts on
/dev/pts, lots of cgroup on /sys/fs/cgroup/*, ...).

* Ignore filesystem with 0 bytes (this cover cgroup, devpts and other).
* Add IgnoreFS to ignore additional FS by their type. Add tmpfs and
  devtmpfs as default ignored type.
2016-02-22 14:34:26 -07:00
Aurélien DEHAY
9687f71a17 README updated for pgrep user support
closes #724
2016-02-22 14:33:37 -07:00
Aurélien DEHAY
ed684be18d Adding pgrep user support 2016-02-22 14:32:04 -07:00
Cameron Sparr
5aef725c13 Change pass/drop to namepass/namedrop for outputs
closes #730
2016-02-22 13:35:06 -07:00
Thibault Cohen
d00550c45f Add metric pass/drop filter 2016-02-22 12:11:33 -07:00
Cameron Sparr
9ce8d78835 Set running output quiet mode in agent connect func
closes #701
2016-02-22 11:42:02 -07:00
Cameron Sparr
29016822fd Sensors input currently only available if built from source 2016-02-21 16:35:56 -07:00
Marcin Jasion
bb50d7edb4 dns_query plugin fixups:
- renamed plugin to dns_query
- domains are optional
- new record types

closes #694
2016-02-21 16:33:04 -07:00
Marcin Jasion
d43d6f2b13 renamed plugin to dns_query and value to query_time_ms
small polishings

added more record types - AAAA and ANY
2016-02-21 16:21:11 -07:00
Marcin Jasion
636dc27ead Dns query input plugin 2016-02-21 16:21:11 -07:00
Cameron Sparr
a18f535f21 Circle script: unset GOGC so it uses default 2016-02-21 16:00:41 -07:00
Cameron Sparr
6994d4a712 Turn GOGC on for packaging, use go 1.5.3 2016-02-21 10:41:46 -07:00
Cameron Sparr
c9d0ae7cf3 Circle script: create packages if commit is tagged 2016-02-20 12:47:31 -07:00
Jason Coene
9edc25999e Minor formatting improvements
closes #727
2016-02-19 16:18:06 -07:00
Jason Coene
53c130b704 Add riak plugin 2016-02-19 16:16:50 -07:00
Cameron Sparr
e4e174981d Skip snmp tests that require docker in short mode 2016-02-19 16:15:14 -07:00
Cameron Sparr
584a52ac21 InfluxDB output should not default to 'no timeout' for http writes
default to 5s instead, since even if it times out we will cache the
points and move on

closes #685
2016-02-19 15:38:51 -07:00
Cameron Sparr
f9b5767dae Provide default args: percpu=true and totalcpu=true for cpu plugin
Also if outputs.file is empty, write to stdout

closes #720
2016-02-19 11:56:33 -07:00
Cameron Sparr
3179829fa5 Update changelog for 0.10.3 2016-02-18 17:18:43 -07:00
Cameron Sparr
187d1b853d Update Makefile to 'go install' rather than 'go build' 2016-02-18 16:48:59 -07:00
Cameron Sparr
8d2e5f0bda Seems to be a toml parse bug around triple pounds 2016-02-18 14:36:03 -07:00
Cameron Sparr
7def6663bd Root directory cleanup 2016-02-18 13:37:36 -07:00
Dragostin Yanev (netixen)
a13d19c582 pugins/outputs/influxdb: Prevent runtime panic.
- Check and return error from NewBatchPoints to prevent runtime panic if
   user provides an unparsable precision time unit in config.
- Provide correct sample config precision examples.
- Update etc/telegraf.conf precision comment.

closes #715
2016-02-18 13:12:20 -07:00
Gabriel Levine
1837f83282 cleaned up the httpjson POST function.
closes #688
closes #394
2016-02-18 10:11:56 -07:00
Cameron Sparr
b14cfd6c64 Add Configuration to statsd input readme
closes #714
2016-02-18 10:09:57 -07:00
Sergio Jimenez
963c51f473 fix(config): Made sample config consistent.
closes #682
2016-02-18 10:01:03 -07:00
Sergio Jimenez
1f77b75e14 fix(sample): Made TOML parser happy again 2016-02-18 09:00:27 +01:00
Sergio Jimenez
e5f3acd139 doc(readme): Added README.md. 2016-02-18 09:00:27 +01:00
Sergio Jimenez
c8365b3b7e test(unit): Removed useless tests 2016-02-18 09:00:27 +01:00
Sergio Jimenez
29c671ce46 fix(mesos): TOML annotation
* It was still using the previous config name
2016-02-18 09:00:27 +01:00
Sergio Jimenez
38ac9d2ecf List mesos in main README
And on the test configuration file
2016-02-18 09:00:27 +01:00
Sergio Jimenez
3573d93855 fix(vet): Range var used by goroutine
* Use it as a paramater for the closure
2016-02-18 09:00:27 +01:00
Sergio Jimenez
3cc2cda026 refactor(naming): For master specific settings
* This should help backwards compatibility when adding more features or
  supported Mesos components
2016-02-18 09:00:27 +01:00
Sergio Jimenez
7d10986f10 test(unit): Test for whitelisted metrics 2016-02-18 09:00:27 +01:00
Sergio Jimenez
8c6a6604ce Comments and cleanup 2016-02-18 09:00:27 +01:00
Sergio Jimenez
7170280401 fix(import): Json parser lives outside internal
* Fixed import for JSONFlattener{} it's now in parsers, broke after
  rebasing.
2016-02-18 09:00:27 +01:00
Sergio Jimenez
babecb6d49 feat(timeout): Use timeout setting
* Use timeout as parameter in the http request
* A bit of cleanup
* More tests
2016-02-18 09:00:27 +01:00
Sergio Jimenez
9770802901 feat(whitelist): Converted black to whitelist
* Defined global var for holding default metric groups
* Refactor removeGroup() to work with the whitelist
* Refactor TestRemoveGroup()
2016-02-18 09:00:27 +01:00
Sergio Jimenez
4c1e817b38 fix(indent): For configuration sample 2016-02-18 09:00:27 +01:00
Sergio Jimenez
52b329be4e plugin(mesos): Reversed removeGroup()
* Now the user selects what to push instead of what not
* Required to check and improve tests
* Missing checks in the code when MetricsCol is empty
2016-02-18 09:00:27 +01:00
Sergio Jimenez
1d50d62a79 plugin(mesos): Added goroutines.
The plugin will iterate over the Servers slice and create a goroutine
for each of them.
2016-02-18 09:00:27 +01:00
Sergio Jimenez
07502c9804 Don't add port to tags just the host 2016-02-18 09:00:27 +01:00
Sergio Jimenez
59e0e49822 Indentation for sample config string 2016-02-18 09:00:27 +01:00
Sergio Jimenez
05170d78be plugin(mesos): Initial commit
The plugin is able to query a Mesos master and push the metrics, a
blacklist can be configured and a timeout, it's still not used.

Added unit test, might be a good idea to have system test using docker.
2016-02-18 09:00:27 +01:00
Cameron Sparr
88c83277c6 Write unit tests for RunningOutput 2016-02-17 17:06:34 -07:00
Cameron Sparr
d0734b105b Start service plugins immediately, fix off-by-one bug 2016-02-17 15:10:32 -07:00
Cameron Sparr
4860dc148c changelog update 2016-02-17 09:53:41 -07:00
Cameron Sparr
ee468be696 Flush based on buffer size rather than time
this includes:
- Add Accumulator to the Start() function of service inputs
- For message consumer plugins, use the Accumulator to constantly add
  metrics and make Gather a dummy function
- rework unit tests to match this new behavior.
- make "flush_buffer_when_full" a config option that defaults to true

closes #666
2016-02-16 22:25:22 -07:00
Cameron Sparr
7f539c951a changelog update 2016-02-15 16:08:45 -07:00
Thibault Cohen
e495ae9030 Add tcp/udp check connection input plugin
closes #650
2016-02-15 13:38:58 -07:00
Cameron Sparr
ccb6b3c64b Small readme formattings 2016-02-14 18:44:48 -07:00
Anton Bykov
85594cc92e Readme: specify compression format for unpacking
closes #693
2016-02-14 15:59:53 -07:00
Andrei Burd
0b72612cd2 Code formatted, Readme updated based on example
closes #695
2016-02-14 15:58:05 -07:00
Vladislav Shub
dd086c7830 Added full support for raindrops and tests 2016-02-14 18:52:26 +02:00
Cameron Sparr
6a601ceb97 Add support for specifying SSL config for influxdb output
closes #191
2016-02-12 17:02:01 -07:00
Cameron Sparr
8236534e3c changelog update 2016-02-12 16:55:27 -07:00
Cameron Sparr
0fef147713 data output readme update 2016-02-12 16:52:33 -07:00
Cameron Sparr
0198296ced Data format output documentation 2016-02-12 16:47:07 -07:00
Cameron Sparr
37726a02af Add Serializer plugins, and 'file' output plugin 2016-02-12 15:05:27 -07:00
Cameron Sparr
a9c135488e Add Serializer plugins, and 'file' output plugin 2016-02-12 14:13:49 -07:00
Thomas Menard
72f5c9b62d postgres plugin bgwriter stats
Add pg_stat_bg_writer stats

closes #683
2016-02-12 11:21:53 -07:00
Cameron Sparr
8d0f50a6fd MQTT Consumer Input plugin 2016-02-12 11:13:32 -07:00
Dragostin Yanev (netixen)
6c353e8b8f Change point_buffer to metric_buffer to conform will changes in https://github.com/influxdata/telegraf/pull/676
closes #680
2016-02-12 10:01:56 -07:00
Dragostin Yanev (netixen)
512d9822f0 Add NATS consumer input plugin. 2016-02-12 09:58:32 -07:00
Cameron Sparr
d003ca46c7 Merge pull request #673 from miketonks/f-docker-percentages
Add calculated cpu and memory percentages to docker input (via config option)
2016-02-11 08:43:55 -07:00
Mike Tonks
7587dc350e Remove config option, percent option always activated. Fix review issues 2016-02-11 10:49:48 +00:00
Cameron Sparr
28664fedb2 Support exec input plugin legacy behavior 2016-02-10 13:26:02 -07:00
Marcus Geiger
ef20f05221 Add --pkgarch option to build.py to specify the packaging architecture
pkg arch can be different to GOARCH.

Example: build for debian on raspberry pi. GOARCH will be arm
but the packaging architecture on debian will be armhf (arm
hard float). The --pkgarch option is passed to fpm to specify
the required architecture which is reflected in the package
manifest and also in the result filename.

closes #675
2016-02-09 17:41:48 -07:00
Miki
cabf5d004d added dovecot plugin
closes #671
2016-02-09 14:10:17 -07:00
Cameron Sparr
d551da26e5 Fix exec input legacy behavior, command='' 2016-02-09 13:49:14 -07:00
Dhruv Bansal
893357f01e Updated Riemann output:
* Customizable 'separator' option instead of hard-coded '_'

* String values are sent as "State" instead of "Metric", preventing
  Riemann from rejecting them

* Riemann service name is set to an (ugly) combination of input name &
  (sorted) tags' values...this allows connecting different events for
  the same input together on the Riemann side

closes #642
2016-02-09 11:17:07 -07:00
Cameron Sparr
fc7fa4b6c5 Cleanup comments and indentation in config file 2016-02-09 11:01:50 -07:00
Cameron Sparr
fb75db2f1f re-arrange and cleanup graphite output test 2016-02-09 11:01:13 -07:00
Mike Tonks
7c20522a30 Add calculated cpu and memory percentages to docker input (via config option) 2016-02-09 15:20:56 +00:00
Cameron Sparr
c09884c686 Fixup some URL typos 2016-02-08 21:36:53 -07:00
Cameron Sparr
9273782093 changelog update 2016-02-08 21:34:22 -07:00
Cameron Sparr
44ffe29c10 Update Godeps and Godeps_windows files 2016-02-08 21:26:56 -07:00
Cameron Sparr
e619493ece Implementing generic parser plugins and documentation
This constitutes a large change in how we will parse different data
formats going forward (for the plugins that support it)

This is working off @henrypfhu's changes.
2016-02-08 21:08:44 -07:00
Henry Hu
1449c8b887 Add Graphite line protocol parsing to exec plugin
closes #637
2016-02-08 17:12:28 -07:00
Cameron Sparr
6b06a23102 Change [tags] to [global_tags] to deal with toml bug
closes #662
2016-02-08 16:20:47 -07:00
Cameron Sparr
b55a93a3e1 update changelog 2016-02-07 09:06:51 -07:00
Cameron Sparr
f5f43e6d1b ping plugin: use -W for linux, -t for bsd/darwin
closes #443
2016-02-06 23:24:47 -07:00
Cameron Sparr
1e03a9440b Try ping plugin with -n and -s options added 2016-02-06 23:09:29 -07:00
codehate
9a59512f75 Add: Telegraf CouchDB Plugin
CouchDB Plugin - Formatted Code

closes #652

Minor fix for CouchDB Plugin

Formatted code fix for CouchDB Plugin

CouchDB Plugin - Changed hosts to full urls

CouchDB Plugin - Formatted Code

CouchDB Plugin - Fatal commit from local fix

CouchDB Plugin - Updated test case
2016-02-05 14:14:19 -07:00
Thibault Cohen
35150caea4 Add a make command with CGO disabled
closes #458
2016-02-04 17:33:40 -07:00
Cameron Sparr
f01da8fee4 Remove extraneous 'v' from README tarball 2016-02-04 11:23:46 -07:00
Cameron Sparr
434c08a357 Release 0.10.2 2016-02-04 11:04:29 -07:00
Cameron Sparr
bd9c5b6995 mqtt output: cleanup, implement TLS
Also normalize TLS config across all output plugins and normalize
comment strings as well.
2016-02-04 10:44:37 -07:00
Cameron Sparr
b941d270ce changelog update 2016-02-03 08:35:03 -07:00
Reginaldo Sousa
9406961125 Fix a bug when setting host header in httpjson
closes #634
2016-02-02 21:59:18 -07:00
Rune Darrud
0d391b66a3 Added support for Windows operating systems pre-Vista. 2016-02-02 21:57:38 -07:00
Cameron Sparr
a11e07e250 Minor change to forgotten config file exit 2016-02-01 17:44:19 -07:00
Cameron Sparr
d266dad1f4 Don't compile ping plugin on windows.
closes #496
2016-02-01 16:39:53 -07:00
Rune Darrud
331b700d1b Corrected a issue that came from code cleanup earlier
wherein missing performance counters caused it to return
early from the loop, instead of ignoring missing in
default configuration mode.

closes #625
2016-01-31 23:17:45 -07:00
Christoph Wegener
2163fde0a4 Fix memory leak: Remove signal.Notify code from plugins/inputs/win_perf_counters.(*Win_PerfCounters).Gather 2016-01-31 23:16:09 -07:00
Cameron Sparr
24a2aaef4b Ansible role in readme 2016-01-30 11:55:48 -07:00
Cameron Sparr
042cf517b2 Mention yum/apt repo in README
Also add `make windows-build` to Makefile

closes #618
2016-01-30 11:35:39 -07:00
Cameron Sparr
b97027ac9a Allow exec plugin to parse line-protocol
closes #613
2016-01-30 11:12:59 -07:00
Christoph Wegener
4ea3f82e50 Replace all single percentage characters with double
percentage characters in sampleConfig string so that fmt.Printf
will interpret them as literal percentage characters when
running 'telegraf.exe -sample-config'

closes #620
2016-01-30 10:10:55 -07:00
Cameron Sparr
38c4111e6c Add unit tests for the root telegraf package 2016-01-29 16:01:34 -07:00
Cameron Sparr
338341add8 Put windows dependencies into a separate Godeps file 2016-01-29 11:10:18 -07:00
Cameron Sparr
93bb679f9d Fix possible panic if stat is nil
closes #612
2016-01-29 10:47:30 -07:00
Pavel Yudin
40d859354f Add powerdns input plugin
closes #614
2016-01-29 09:40:04 -07:00
Cameron Sparr
9e7c8df384 statsd: allow template parsing fields. Default to value=
closes #602
2016-01-28 16:56:50 -07:00
Rune Darrud
f088dd7e00 Added plugin to read Windows performance counters
closes #575
2016-01-28 16:35:13 -07:00
Cameron Sparr
10c4e4f63f Fix datadog json marshalling
fixes #607
2016-01-28 16:12:33 -07:00
Cameron Sparr
962325cc40 Warn when metrics are being overwritten
closes #601
2016-01-28 14:00:14 -07:00
root
a9c33abfa5 sql server: update README.md
closes #594
2016-01-28 13:50:26 -07:00
Cameron Sparr
d835c19fce Insert . between msrmnt and field name in datadog output
fixes #600
2016-01-28 12:04:26 -07:00
Marcin Bunsch
1f1384afc6 Use a single measurement with fields for timings in statsd plugin.
closes #603
2016-01-28 12:03:48 -07:00
Cameron Sparr
9d4b55be19 Include all tag values in graphite output
closes #595
2016-01-28 10:58:35 -07:00
Cameron Sparr
c549ab907a Throughout telegraf, use telegraf.Metric rather than client.Point
closes #599
2016-01-27 23:47:32 -07:00
Cameron Sparr
9c0d14bb60 Create public models for telegraf metrics, accumlator, plugins
This will basically make the root directory a place for storing the
major telegraf interfaces, which will make telegraf's godoc looks quite
a bit nicer. And make it easier for contributors to lookup the few data
types that they actually care about.

closes #564
2016-01-27 15:42:50 -07:00
Cameron Sparr
a822d942cd 386 -> i386 2016-01-27 13:42:34 -07:00
Cameron Sparr
3a64a01f91 Release 0.10.1 2016-01-27 12:55:06 -07:00
Cameron Sparr
6ebb6bc7ee Fix SNMP unit tests on OSX, improve tag config doc
closes #592
2016-01-27 11:27:10 -07:00
Cameron Sparr
be95dfdd0e Update dependency hashes 2016-01-26 16:27:18 -07:00
Cameron Sparr
88890fa7c2 Update changelog 2016-01-26 16:05:20 -07:00
Andrea Sosso
f8930b9cbc Additional request header parameters for httpjson plugin
closes #471
2016-01-26 16:02:47 -07:00
Cameron Sparr
c10227a766 Update changelog and readme, and small tweaks to github_webhooks 2016-01-26 15:57:06 -07:00
Cameron Sparr
7e7e462de1 Merge branch 'ghWebhooks'
closes #573
2016-01-26 15:29:41 -07:00
root
a93e1ceac8 Add sqlserver input plugin
closes #589
2016-01-26 14:55:27 -07:00
Cameron Sparr
7f8469b66a Fixup some disk usage reporting, make it reflect df
fixes #513
2016-01-26 11:56:28 -07:00
Wu Taizeng
cf568487c8 Fix some inputs panic will lead to the telegraf exit
closes #585
closes #584
2016-01-26 10:48:24 -07:00
Jack Zampolin
4c74a2dd3a Fix naming issue 2016-01-25 17:34:44 -08:00
Jack Zampolin
a70452219b Remove internal dependancy 2016-01-25 17:28:28 -08:00
Ross McDonald
47ea2d5fb4 Added Amazon Linux logic to post-installation script.
closes #579
2016-01-25 17:49:12 -07:00
Ross McDonald
16540e35f1 Backporting fixes from the influxdb build script, along with a few improvements:
- Added iteration to tar/zip output name (in case of pre-releases)
- Switched 32-bit signifier to i386 from 386
- Tweaked upload settings to allow for folder paths in bucket names
2016-01-25 15:13:55 -07:00
Cameron Sparr
3bfb3a9fe2 Insert documentation into sample-config on JSON parsing
closes #521
2016-01-25 13:29:05 -07:00
Lukasz Jagiello
f9517dcf24 RabbitMQ plugin - extra fields:
Extra fields describing size of all message bodies in the queue.

* message_bytes
* message_bytes_ready
* message_bytes_unacknowledged
* message_bytes_ram
* message_bytes_persistent

More information about each field:
https://www.rabbitmq.com/man/rabbitmqctl.1.man.html

closes #577
2016-01-25 13:00:54 -07:00
Jack Zampolin
7878b22b09 Add README.md 2016-01-25 11:42:03 -08:00
Thibault Cohen
e6d7e4e309 Add snmp input plugin
closes #546
closes #40
2016-01-25 12:35:27 -07:00
Tim Raymond
40d0da404e Change configuration package to influxdata/config
We are unifying the way that we handle configuration across the products
into the influxdata/config package. This provides the same API as
naoina/toml that was used previously, but provides some additional
features such as support for documenting generated TOML configs as well
as support for handling default options. This replaces all usage of
naoina/toml with influxdata/config.
2016-01-25 12:02:44 -07:00
Cameron Sparr
8675bd125a add 'gdm restore' to adding a dependency instructions 2016-01-25 10:59:44 -07:00
Jack Zampolin
4e5dfa5d33 Address PR comments and merge conflicts 2016-01-25 09:56:57 -08:00
Jack Zampolin
89f5b77550 Fix merge conflict in all.go 2016-01-22 16:51:54 -08:00
Jack Zampolin
5b15cd9163 Change github.com/influxdata to github.com/influxdb where necessary 2016-01-22 16:48:42 -08:00
Jack Zampolin
dbf1383a38 Change github.com/influxdata to github.com/influxdata 2016-01-22 16:45:31 -08:00
Jack Zampolin
46b367e74b Add tests 2016-01-22 16:43:33 -08:00
Cameron Sparr
3da390682d Kinesis output shouldn't return an error for no reason 2016-01-22 17:32:36 -07:00
Cameron Sparr
5349a3b6d1 Implement a per-output fixed size metric buffer
Also moved some objects out of config.go and put them in their own
package, internal/models

fixes #568
closes #285
2016-01-22 16:29:02 -07:00
Cameron Sparr
f2ab5f61f5 Gather elasticsearch nodes in goroutines, handle errors
fixes #464
2016-01-21 17:00:44 -07:00
Cameron Sparr
e910a03af4 Changelog update 2016-01-21 16:44:35 -07:00
Cameron Sparr
4d0dc8b7c8 Refactor the docker plugin, use go-dockerclient throughout
fixes #503
fixes #463
2016-01-21 16:07:03 -07:00
Stephen Kwong
e0dc1ef5bd Add Cloudwatch output
closes #553
2016-01-21 09:11:52 -07:00
Cameron Sparr
f24f5e98dd Remove go get ./... from the Makefile 2016-01-20 15:01:08 -07:00
Cameron Sparr
6647cfc228 statsd: If parsing a value to int fails, try to float and cast to int
fixes #556
2016-01-20 14:30:57 -07:00
Jack Zampolin
ddcd99a1ce Push ghwebhooks branch 2016-01-20 12:19:03 -08:00
Cameron Sparr
55c07f23b0 Update contributing document 2016-01-20 12:33:49 -07:00
Cameron Sparr
8192572e23 Update changelog 2016-01-20 12:22:11 -07:00
Jack Zampolin
0cdf1b07e9 Fix issue 524 2016-01-20 10:57:35 -08:00
Jack Zampolin
8653bae6ac Change start implementation 2016-01-20 10:49:42 -08:00
Cameron Sparr
fc1aa7d3b4 Filter mount points before stats are collected
fixes #440
2016-01-20 11:46:59 -07:00
Jack Zampolin
8bdcd6d576 First commit for ghwebhooks service plugin 2016-01-19 23:14:11 -08:00
Cameron Sparr
d3925fe578 Include CPU usage percent with procstat data
closes #484
2016-01-19 22:52:55 -07:00
Cameron Sparr
d3a5cca1bc Collection interval random jittering
closes #460
2016-01-19 13:22:54 -07:00
Cameron Sparr
f3b553712a Changelog update 2016-01-19 11:21:05 -07:00
Cameron Sparr
839651fadb Change default statsd packet size to 1500, make configurable
Also modifying the internal UDP listener/parser code to make it able to
handle higher load. The udp listener will no longer do any parsing or
string conversion. It will simply read UDP packets as bytes and put them
into a channel. The parser thread will now deal with splitting the UDP
metrics into separated strings.

This could probably be made even better by leaving everything as byte
arrays.

fixes #543
2016-01-19 11:08:16 -07:00
Thibault Cohen
6a50fceea4 Replace plugins by inputs in some strings
closes #542
2016-01-19 10:09:37 -07:00
Cameron Sparr
7efe108686 Update Godeps file 2016-01-19 09:44:25 -07:00
Hannu Valtonen
c313af1b24 kafka: Add support for using TLS authentication for the kafka output
With the advent of Kafka 0.9.0+ it is possible to set up TLS client
certificate based authentication to limit access to Kafka.

Four new configuration variables are specified for setting up the
authentication. If they're not set the behavior stays the same as
before the change.

closes #541
2016-01-18 11:17:01 -07:00
Vinh
1388b1b58b Add phusion Passenger plugin
Gather metric by parsing XMLoutput of `passenger-status` utility.
More information of this utility:
https://www.phusionpassenger.com/library/admin/apache/overall_status_report.html

closes #522
2016-01-18 11:14:04 -07:00
Thibault Cohen
551db20657 Add SIGHUP support to reload telegraf config
closes #539
2016-01-18 11:07:07 -07:00
Cameron Sparr
bc71e956a5 changelog bugfix update 2016-01-18 11:05:14 -07:00
Vinh
5af6974796 phpfpm plugin: enhance socket gathering and config
- If we detect errors when gathering stat via socket, return those error
  so it canbe appear in Telegraf log
- Improve fcgi client, also upgrade it to current version of Go at
  https://golang.org/src/net/http/fcgi/fcgi.go
- Add test for unix socket and fcgi
  to remotely connect but only as an extra url field.
- Allow customization of fpm status path
- Document about using of `host` in case `unixsocket` that it isn't used
- Documet upgrade for new data layout

closes #499
closes #502
closes #538
2016-01-18 10:56:45 -07:00
Eugene Dementiev
a712036b56 core: print error on output connect fail
closes #537
2016-01-16 17:32:23 -07:00
Eugene Dementiev
37b96c192b output amqp: Add ssl support
closes #536
2016-01-16 17:31:05 -07:00
Cameron Sparr
8cbdf0f907 Tweak config messages for graphite. Update changelog and readme
closes #494
2016-01-16 17:29:02 -07:00
Thibault Cohen
ef5c630d3a Add Graphite output 2016-01-16 17:19:27 -07:00
Cameron Sparr
6eea89f4c0 Make NSQ plugin compatible with version 0.10.0 2016-01-15 17:04:23 -07:00
Jeff Nickoloff
dbbb2d9877 NSQ Plugin
- Polls a set of NSQD REST endpoints and collects counters for all topics, channels, and clients

Signed-off-by: Jeff Nickoloff <jeff@allingeek.com>

closes #492
2016-01-15 16:09:31 -07:00
Cameron Sparr
c483e16d72 Add option to disable statsd name conversion
closes #467
closes #532
2016-01-15 15:58:09 -07:00
Cameron Sparr
40a5bad968 Update procstat doc 2016-01-15 15:55:52 -07:00
Kostas Botsas
1421bce371 Update README.md 2016-01-15 14:49:53 -08:00
Kostas Botsas
5e7dd6d51b Merge pull request #533 from influxdata/fix-interval-option-v0.10
interval options should have string value
2016-01-15 14:49:11 -08:00
Kostas Botsas
71f4e72b22 interval options should have string value
also mentioned name_override and name_prefix on top of name_suffix
2016-01-15 14:48:45 -08:00
Cameron Sparr
b24e71b232 Removing old package script, trim Makefile 2016-01-15 14:03:04 -07:00
Cameron Sparr
f60c090e4c Add a quiet mode to telegraf
closes #514
2016-01-15 13:31:04 -07:00
Cameron Sparr
50334e6bac Only compile the sensors plugin if the 'sensors' tag is set 2016-01-15 13:15:33 -07:00
Cameron Sparr
963a9429dd Tweak changelog for sensors plugin, and add a non-linux build file
closes #519
closes #168
2016-01-15 11:22:33 -07:00
Matt Davis
2eda8d64c7 Added infor to readme and changelog 2016-01-15 11:17:46 -07:00
Matt Davis
9b96c62e46 Change build configuration to linux only 2016-01-15 11:17:46 -07:00
Matt Davis
378b7467a4 Fixed an unused variable 2016-01-15 11:17:46 -07:00
Matt Davis
c0d98ecd4b Added initial support for gosensors module 2016-01-15 11:17:46 -07:00
Thibault Cohen
b44644b6bf Add response time to httpjson plugin
closes #475
2016-01-15 11:13:12 -07:00
Ross McDonald
7bfb42946e Switched to /etc/debian_version for Debian/Ubuntu distribution recognition in post-install.
closes #526
closes #525
2016-01-15 10:54:47 -07:00
Cameron Sparr
e8907acd28 Update Godeps and fix changelog 2014->2016 2016-01-14 22:35:05 -07:00
Kevin Fitzpatrick
d6ef3b1e02 Note on where to look for plugin information 2016-01-14 21:35:10 -07:00
Cameron Sparr
a39a7a7a03 Add an interface:"all" tag to the net protocol counters
fixes #508
2016-01-14 19:55:19 -07:00
Kostas Botsas
923be102b3 Align exec documentation with v0.10 updates 2016-01-14 15:55:53 -08:00
Hannu Valtonen
7531e218c1 build.py: Make build script work on both Python2.x and Python3.x
While at it also add a missing dependency on lsof required by the
netstat plugin.

closes #512
2016-01-14 10:28:50 -08:00
Thibault Cohen
3cc1fecb53 Ping input doesn't return response time metric when timeout
closes #506
2016-01-14 10:12:10 -08:00
Philip Silva
3c89847489 internal: FlattenJSON, flatten arrays as well
With HTTP JSON or Elasticsearch, one can also process values nested in arrays.
2016-01-14 09:52:58 -08:00
Cameron Sparr
fb837ca66d Add 0.10.0 blog post link to README 2016-01-14 09:21:01 -08:00
Cameron Sparr
2ec1ffdc11 Fix Telegraf s3 upload and readme links
fixes #505
2016-01-11 13:35:26 -07:00
Cameron Sparr
56509a61b9 Change 0.3.0 -> 0.10.0 2016-01-08 17:22:23 -07:00
Cameron Sparr
f37f8ac815 Update changelog and readme for package updates 2016-01-08 15:13:58 -07:00
Ross McDonald
231b5feb23 Merge pull request #497 from influxdata/rm-package-updates
Packaging Updates
2016-01-08 15:54:09 -06:00
Ross McDonald
81fa063338 Removed data directory entries, since Telegraf doesn't need them. 2016-01-08 15:34:11 -06:00
Ross McDonald
07b4a4dbca Added a build.py script for compiling and packaging. Added post and pre install scripts to handle installation and upgrades in a cleaner way. Minor fixes to the init script and service unit file. 2016-01-08 15:28:33 -06:00
Cameron Sparr
fd6daaa73b 0.3.0: update README and documentation 2016-01-08 15:25:04 -06:00
Cameron Sparr
6496d185ab add backwards-compatability for 'plugins', remove [inputs] and [outputs] headers 2016-01-08 12:49:50 -07:00
Cameron Sparr
7499c1f969 0.3.0: update README and documentation 2016-01-08 10:33:25 -07:00
Cameron Sparr
9c5db1057d renaming plugins -> inputs 2016-01-07 15:04:30 -07:00
Cameron Sparr
30d24a3c1c 0.3.0 documentation changes and improvements 2016-01-07 13:02:59 -07:00
Cameron Sparr
ad4af06802 Update Makefile and Godeps and various fixups 2016-01-07 12:33:26 -07:00
Cameron Sparr
64b98a9b61 0.3.0 unit tests: agent and prometheus 2016-01-07 10:52:46 -07:00
Cameron Sparr
4fdcb136bc 0.3.0 unit tests: internal 2016-01-07 10:23:38 -07:00
Cameron Sparr
0e398f5802 0.3.0 unit tests: amon, datadog, librato 2016-01-07 10:09:04 -07:00
Cameron Sparr
b9869eadc3 0.3.0 unit tests: influxdb 2016-01-07 01:11:52 -07:00
Cameron Sparr
936c5a8a7a 0.3.0 unit tests: rethinkdb, twemproxy, zfs 2016-01-06 22:16:04 -07:00
Cameron Sparr
10f19fade1 0.3.0 unit tests: statsd, trig, zookeeper 2016-01-06 18:19:18 -07:00
Cameron Sparr
c01594c2a4 0.3.0 unit tests: rabbitmq, redis 2016-01-06 18:13:00 -07:00
Cameron Sparr
ccbd7bb785 0.3.0 unit tests: procstat, prometheus, puppetagent 2016-01-06 17:56:30 -07:00
Cameron Sparr
6eb49dee5d 0.3.0 unit tests: mysql, nginx, phpfpm, ping, postgres 2016-01-06 17:37:56 -07:00
Cameron Sparr
6a4bf9fcff 0.3.0 unit tests: mailchimp, memcached, mongodb 2016-01-06 17:19:39 -07:00
Cameron Sparr
9ada89d51a 0.3.0 unit tests: jolokia, kafka_consumer, leofs, lustre2 2016-01-06 16:55:28 -07:00
Cameron Sparr
524fddedb4 0.3.0 unit tests: exec, httpjson, and haproxy 2016-01-06 16:11:16 -07:00
Cameron Sparr
c4a7711e02 0.3.0 unit tests: disque and elasticsearch 2016-01-05 23:48:59 -07:00
Cameron Sparr
2e20fc413c 0.3.0 unit tests: aerospike, apache, bcache 2016-01-05 23:48:59 -07:00
Cameron Sparr
498482d0f6 0.3.0 unit tests: system plugins 2016-01-05 23:48:59 -07:00
Cameron Sparr
4bd5b6a4d6 Fix httpjson panic for nil request body 2016-01-05 23:48:59 -07:00
Cameron Sparr
2e764cb22d 0.3.0 Removing internal parallelism: twemproxy and rabbitmq 2016-01-05 23:48:59 -07:00
Cameron Sparr
c8914679b7 0.3.0 Removing internal parallelism: procstat 2016-01-05 23:48:59 -07:00
Cameron Sparr
e25ac0d587 0.3.0 Removing internal parallelism: postgresql 2016-01-05 23:48:59 -07:00
Cameron Sparr
41374aabcb 0.3.0 Removing internal parallelism: httpjson and exec 2016-01-05 23:48:59 -07:00
Cameron Sparr
f60d846eb3 0.3.0 outputs: riemann 2016-01-05 23:48:59 -07:00
Cameron Sparr
96e54ab326 CHANGELOG update 2016-01-05 23:48:59 -07:00
Cameron Sparr
40a3feaad0 0.3.0 outputs: opentsdb 2016-01-05 23:48:59 -07:00
Cameron Sparr
2611931f82 0.3.0 output: librato 2016-01-05 23:48:59 -07:00
Cameron Sparr
ec39d10695 0.3.0 output: datadog and amon 2016-01-05 23:48:59 -07:00
Cameron Sparr
30d8ed411a 0.3.0: mongodb and jolokia 2016-01-05 23:48:59 -07:00
Cameron Sparr
64a832467e 0.3.0: postgresql and phpfpm 2016-01-05 23:48:59 -07:00
Cameron Sparr
9c5321c538 0.3.0 HAProxy rebase 2016-01-05 23:48:59 -07:00
Cameron Sparr
aba123dae0 0.3.0: rethinkdb 2016-01-05 23:48:59 -07:00
Cameron Sparr
5aca58ad2a 0.3.0: zookeeper and zfs 2016-01-05 23:48:59 -07:00
Cameron Sparr
a34418d724 backwards compatability for io->diskio change 2016-01-05 23:48:59 -07:00
Cameron Sparr
5f4262921a 0.3.0: trig and twemproxy 2016-01-05 23:48:59 -07:00
Cameron Sparr
6fcd05b855 0.3.0 redis & rabbitmq 2016-01-05 23:48:59 -07:00
Cameron Sparr
7746a2b3cd 0.3.0: prometheus & puppetagent 2016-01-05 23:48:59 -07:00
Cameron Sparr
2749dcd128 0.3.0: procstat 2016-01-05 23:48:59 -07:00
Cameron Sparr
92343d91d6 0.3.0: ping, mysql, nginx 2016-01-05 23:48:59 -07:00
Cameron Sparr
ce7b48143a 0.3.0: mailchimp & memcached 2016-01-05 23:48:59 -07:00
Cameron Sparr
e30e98a496 0.3.0: leofs & lustre2 2016-01-05 23:48:59 -07:00
Cameron Sparr
4798bd9d33 0.3.0 httpjson 2016-01-05 23:48:59 -07:00
Cameron Sparr
38d6cb97ad 0.3.0: HAProxy 2016-01-05 23:48:59 -07:00
Cameron Sparr
3be111a160 Breakout JSON flattening into internal package, exec & elasticsearch aggregation 2016-01-05 23:48:59 -07:00
Cameron Sparr
97a66b73cf Updating aerospike & apache plugins for 0.3.0 2016-01-05 23:48:59 -07:00
Cameron Sparr
50fc3ec974 Updating system plugins for 0.3.0 2016-01-05 23:48:59 -07:00
Filippo Vitale
ee8d99b955 fix too restrictive .gitignore
closes #483
2016-01-05 20:30:45 -07:00
Cameron Sparr
5bf7c4d241 Update circleci badge 2016-01-05 14:18:31 -07:00
Aleksei Magusev
c2b5f21832 Fix typo in telegraf.conf
Closes #456.
2016-01-04 01:30:49 +01:00
Cameron Sparr
bdac9b7241 Update 0.3.0 beta links in readme 2015-12-21 13:53:16 -08:00
Cameron Sparr
ec6eae9537 Links for the 0.3.0 beta version 2015-12-19 23:50:05 -07:00
Cameron Sparr
f607074899 remove Name from influxdb unit test 2015-12-18 16:39:23 -07:00
Cameron Sparr
0571eecb0c Remove 'Name' argument from influxdb plugin for 0.3.0 compatability
closes #449
2015-12-18 16:26:15 -07:00
Mark Rushakoff
4f3d6ddf17 Add influxdb plugin
This was primarily intended to consume InfluxDB-style expvars,
particularly InfluxDB's `/debug/vars` endpoint.

That endpoint follows a structure like

```json
{
  "httpd::8086": {
    "name": "httpd",
    "tags": {
      "bind": ":8086"
    },
    "values": {
      "pointsWrittenOK": 33756,
      "queryReq": 19,
      "queryRespBytes": 26973,
      "req": 428,
      "writeReq": 205,
      "writeReqBytes": 3939161
    }
  }
}
```

There are an arbitrary number of top-level keys in the JSON response at
the configured URLs, and this plugin will iterate through all of their
values looking for objects with keys "name", "tags", and "values"
indicating a metric to be consumed by telegraf.

Running this on current master of InfluxDB, I am able to record nearly
the same information that is normally stored in the `_internal`
database; the only measurement missing from `_internal` is `runtime`,
which is present under the "memstats" key but does not follow the format
and so is not consumed in this plugin.

```
$ influx -database=telegraf -execute 'SHOW FIELD KEYS FROM /influxdb/'

name: influxdb_influxdb_engine
----------------------------
fieldKey
blksWrite
blksWriteBytes
blksWriteBytesC
pointsWrite
pointsWriteDedupe

name: influxdb_influxdb_httpd
---------------------------
fieldKey
pingReq
pointsWrittenOK
queryReq
queryRespBytes
req
writeReq
writeReqBytes

name: influxdb_influxdb_shard
---------------------------
fieldKey
fieldsCreate
seriesCreate
writePointsOk
writeReq

name: influxdb_influxdb_subscriber
--------------------------------
fieldKey
pointsWritten

name: influxdb_influxdb_wal
-------------------------
fieldKey
autoFlush
flushDuration
idleFlush
memSize
metaFlush
pointsFlush
pointsWrite
pointsWriteReq
seriesFlush

name: influxdb_influxdb_write
---------------------------
fieldKey
pointReq
pointReqLocal
req
subWriteOk
writeOk
```
2015-12-18 15:41:16 -07:00
chrispeterson
34f0c593ad add additional stats that were already being collected
and rearrange the order to match the index order from the CSV endpoint

add test coverage. add back wretr.  remove check_status from recently added column

closes #445
2015-12-17 15:00:17 -07:00
jipperinbham
97ebcc2af1 close r.Body, remove network metrics, updated other sections as needed
closes #430
closes #452
2015-12-17 14:43:14 -07:00
Cameron Sparr
4852b5c11e Do not rely on external server for amon unit tests 2015-12-11 13:09:09 -07:00
Cameron Sparr
16ce06f621 Use gdm for dependency management 2015-12-11 12:22:16 -07:00
Cameron Sparr
811a54af6c Remove Godeps/ directory 2015-12-11 11:45:20 -07:00
Cameron Sparr
e02973b6f4 Go fmt kinesis output test file 2015-12-11 11:45:07 -07:00
James Lamb
b91eab6737 add amazon kinesis as an output plugin
closes #428
2015-12-11 11:29:03 -07:00
Allen Petersen
c89ef84df7 Separate pool tag and stat collection.
closes #427
2015-12-11 10:59:11 -07:00
Allen Petersen
e3c8a1131a Fix single dataset test.
The "two pool, one metic" test was only passing because of previous calls to Gather() had already populated the values.
2015-12-08 05:53:11 -08:00
Allen Petersen
eb78b9268f Add zfs pool stats collection. 2015-12-08 05:11:41 -08:00
Cameron Sparr
d62e63c448 Telegraf 0.2.4 version bump 2015-12-07 17:01:12 -07:00
Cameron Sparr
03e66d5b87 Implement Glob matching for pass/drop filters 2015-12-07 16:58:31 -07:00
Tait Clarridge
22afc99f1e Add support for pass/drop/tagpass/tagdrop for outputs
Reuses same logic as the plugins for filtering points, should be only
a marginal performance decrease to check all the points before writing
to the output.

Added examples to the README as well (for generic pass/drop as well as
output pass/drop/tagpass/tagdrop).

X-Github-Closes #398

closes #398
closes #401
2015-12-04 15:51:53 -07:00
Cameron Sparr
c83f220fc4 Resolve gopsutil & unit test issues with net proto stats 2015-12-04 15:12:18 -07:00
Nathaniel Cook
0d0a8e9b68 Add network protocol stats to the network plugin 2015-12-04 14:06:18 -07:00
Cameron Sparr
bcafadb68a Convert uptime to float64 for backwards compatability.
Fixes #390
2015-12-04 13:47:24 -07:00
Cameron Sparr
9999b2e3c6 Remove from test and test-short in Makefile 2015-12-04 12:27:56 -07:00
Cameron Sparr
6c23fb3173 Mailchimp report plugin 2015-12-04 12:25:16 -07:00
Cameron Sparr
e6517d4140 Update gopsutil godep dependency. Dont use godep go build anymore
godep seems to have a problem when dependencies have `internal`
packages. So removing `godep go build` and `godep go test` from the
build process in favor of just checking out the correct revisions using
`godep restore` into the regular GOPATH.

This basically means that we are not actually using anything within the
Godeps directory except Godeps.json. I should probably make a separate
go dependency management system that does this.
2015-12-04 12:22:16 -07:00
Cameron Sparr
00a6dbbe97 cpu plugin: update LastStats before returning
fixes #388
2015-12-03 16:23:49 -07:00
Cameron Sparr
4cf47dcd0f memcached plugin. Break out metric parsing into it's own func
And unit test it using a sample response string. This will make it
easier to see what other metrics are available to the plugin for adding
future metrics.
2015-12-03 13:53:37 -07:00
Cameron Sparr
03863bd84d memcached plugin: support unix sockets
closes #415
2015-12-03 13:25:43 -07:00
Cameron Sparr
7a2eeb7439 Add optional auth credentials to Jolokia plugin
closes #414
2015-12-03 11:48:20 -07:00
Cameron Sparr
6fb7d2883d io plugin, add an 'unknown' tag when the serial number can't be found
closes #405
2015-12-02 13:20:59 -07:00
Carlos J. Torres
a844c1ac74 redis_test.go with instantaneous input/output 2015-12-02 13:17:49 -07:00
Carlos J. Torres
a7b77d9658 add instantaneous input/output to redis plugin. 2015-12-02 13:17:49 -07:00
Mischa Gresser
3509713a23 Adding all memcached stats that return a single value
as described at
https://docs.oracle.com/cd/E17952_01/refman-5.0-en/ha-memcached-stats-general.html

closes #412
2015-12-02 13:14:12 -07:00
Regan Kuchan
4b3b41fea5 Create trig plugin
closes #404
2015-12-01 18:11:03 -07:00
Cameron Sparr
2be7fc072f Don't use panic-happy prometheus client With() function
Deals with part of #405, although it doesn't deal with the underlying
label error that is occuring.
2015-12-01 10:21:36 -07:00
Cameron Sparr
ca222a14de Make Prometheus output tests skipped in short mode.
Because they do some networking.

Fixes #385
2015-12-01 09:45:25 -07:00
Cameron Sparr
4aa94ee290 Update CHANGELOG and README for 0.2.3 2015-11-30 19:08:49 -07:00
Daniel Malon
5c051eb801 Parse statsd lines with multiple metric bits
closes #354
2015-11-30 15:25:35 -07:00
Cameron Sparr
3761f00062 Update etc/telegraf.conf file 2015-11-30 14:28:09 -07:00
Tait Clarridge
b705608b04 Change aerospike plugin server tag to aerospike_host
This is to avoid a conflict with the standard "host" tag that is
used everywhere.

closes #399
2015-11-30 10:43:28 -07:00
Cameron Sparr
a5f2d5ff21 Put Agent Config into the config package 2015-11-30 10:31:31 -07:00
Cameron Sparr
979e5f193a Overhaul config <-> agent coupling. Put config in it's own package. 2015-11-25 19:07:04 -07:00
Cameron Sparr
8dde60e869 Revert much of the newer config file parsing, fix tagdrop/tagpass 2015-11-25 19:06:36 -07:00
Cameron Sparr
224a570a08 Eliminate merging directory structures 2015-11-25 19:06:36 -07:00
Cameron Sparr
78f2ea89f8 Change plugin config to be specified as a list
This makes plugin configuration more similar to output configuration,
where we can specify multiple plugins as a list. The idea behind this is
that the Telegraf agent can handle the multi-processing and error
handling better than each plugin handling that internally. This will
also allow for having different plugin configurations for different
instances of the same type of plugin.
2015-11-25 19:06:36 -07:00
Tero Marttila
13ccf420d7 cmd/telegraf: -configdirectory only includes files ending in .conf
Closes #392
2015-11-25 19:05:51 -07:00
Eduard Carreras
d47740bd8d Add a comment indicating pattern uses pgrep -f 2015-11-25 19:05:22 -07:00
Eduard Carreras
e2aa0e8a35 Use pgrep with a pattern 2015-11-25 19:05:22 -07:00
Tero Marttila
d505be1fd4 cmd/telegraf: -configdirectory only includes files ending in .conf 2015-11-25 18:45:11 -07:00
gotyaoi
40fd33d1b0 GOPATH can have multiple : separated paths in it.
This means that simply adding /bin to the end is not enough. Instead of
setting GOBIN, this version prepends things to the PATH. If GOBIN is
already set, simply prepends GOBIN to PATH.  If not, appends /bin to
each component of GOPATH, then prepends that to PATH.

closes #386
2015-11-25 18:42:28 -07:00
Cameron Sparr
317a352a65 Skip measurements with NaN fields
fixes #389
2015-11-23 16:03:11 -07:00
Cameron Sparr
970bfce997 Fix kafka plugin and rename to kafka_consumer
fixes #371
2015-11-19 13:41:58 -07:00
Cameron Sparr
a3feddd8ed Riemann output: remove some of the object referencing/dereferencing
closes #378
closes #379
2015-11-18 15:34:05 -07:00
Cameron Sparr
a8294c2c34 Godep: Add raidman riemann client 2015-11-18 14:27:20 -07:00
Jeffrey Allen
0823eed546 Add riemann output
Closes #34
2015-11-18 13:56:22 -07:00
Cameron Sparr
f85bc6e7f7 Update README for 0.2.2 2015-11-18 12:09:09 -07:00
Cameron Sparr
21c4e70f33 Dont append to slices in mergeStruct 2015-11-18 11:48:46 -07:00
gunnaraasen
03a6f28d55 Use 'CREATE DATABASE IF NOT EXISTS' syntax
closes #376
2015-11-18 00:41:25 -07:00
Cameron Sparr
19e5d975ca Updating CHANGELOG and README for version 0.2.1 2015-11-16 16:23:50 -07:00
Cameron Sparr
5664625f67 Update README, CHANGELOG, and unit tests with list output 2015-11-16 10:43:03 -07:00
Daniel Malon
375045953f FreeBSD compatibility
- Use gopsutils istead of gosigar
- Bump go-dockerclient

closes #372
2015-11-16 10:32:58 -07:00
Cameron Sparr
b10b186cc8 Allow users to specify outputs as lists
This will provide the ability to specify multiple outputs for a single
type of output.

In essence, allowing this:

[outputs]

[[outputs.influxdb]]
  urls = ["udp://localhost:8089"]
  database = "udp-telegraf"

[[outputs.influxdb]]
  urls = ["http://myhost:8086"]
  database = "telegraf"

[[outputs.kafka]]
  brokers = ["192.168.99.100:9092"]
  topic = "telegraf"

closes #335
2015-11-16 10:01:28 -07:00
Cameron Sparr
bf8e0f4cae CHANGELOG update 2015-11-13 14:42:21 -07:00
Cameron Sparr
a6ae597dfc MQTT output unit tests w/ docker container 2015-11-13 13:42:06 -07:00
Cameron Sparr
b975419bc7 Apache plugin unit tests and README 2015-11-13 13:01:00 -07:00
Cameron Sparr
0f036d6bec InfluxDB output: add tests and a README 2015-11-13 10:42:35 -07:00
Codeb Fan
20fbfc7006 Twemproxy go fmt and bug fixups, CHANGELOG, README
closes #365
2015-11-13 09:43:48 -07:00
Codeb Fan
e167b72b16 Add plugin for Twemproxy
This plugin collects data from Twemproxy's stats interface
2015-11-13 09:40:29 -07:00
Cameron Sparr
68ef07bff6 Update CHANGELOG with UDP output 2015-11-12 16:02:46 -07:00
Cameron Sparr
10a20e208a Godep update and dependency resolution 2015-11-12 15:20:01 -07:00
Cameron Sparr
e10394ba3b Use the UDP client for writing to InfluxDB 2015-11-12 14:52:35 -07:00
鲁晓敏
019585f0db phpfpm: add socket fcgi support 2015-11-12 10:44:51 -07:00
鲁晓敏
e619845ffe measurement name should have prefix before ShouldPass check 2015-11-12 10:43:50 -07:00
Cameron Sparr
3012928452 Fix config file tab indentation 2015-11-12 09:52:35 -07:00
Cameron Sparr
352ccde52b Fix new error return of client.NewPoint 2015-11-11 15:38:22 -07:00
Cameron Sparr
92fb51026a Godep update: gopsutil 2015-11-11 15:38:22 -07:00
Cameron Sparr
acf9c1141a Change duration -> internal and implement private gopsutil methods 2015-11-11 15:38:22 -07:00
Cameron Sparr
a8bcc51071 Godep update: influxdb 2015-11-11 15:38:22 -07:00
Cameron Sparr
dcd1c6766c Godep save: gopsutil 2015-11-11 15:38:22 -07:00
Cameron Sparr
00ee2529bc Revert "redis: support IPv6 addresses with no port"
This reverts commit 2af97cdbcb.
2015-11-11 15:33:58 -07:00
Nicholas Katsaros
2af97cdbcb redis: support IPv6 addresses with no port
closes #356
2015-11-10 10:02:42 -07:00
martinrusev
1accab02ed Amon output
closes #350
2015-11-09 10:44:28 -07:00
Roman Statsevich
1a05899be0 removed "panic" from zfs plugin
also added zfs plugin to README.md

closes #341
2015-11-09 10:38:33 -07:00
Roman Statsevich
d54f6be639 add ZFS plugin 2015-11-09 10:37:36 -07:00
Subhachandra Chandra
00614026b3 Added parameters "Devices" and "SkipSerialNumber to DiskIO plugin.
"Devices" can be used to specify storage devices on which stats
should be reported. "SkipSerialNumber" can be used to omit
the device serial number.

Added tests to verify the new parameters.

closes #344
2015-11-06 17:11:57 -07:00
saiello
acf1da4d30 Added jolokia README.md
closes #337
2015-11-06 14:08:07 -07:00
saiello
921ffb7bdb Test for jolokia plugin 2015-11-06 14:07:02 -07:00
saiello
b2e22cbc59 Add fields value test methods 2015-11-06 14:07:02 -07:00
saiello
55c598f9ff Create a JolokiaClient. allowing to inject a stub implementation 2015-11-06 14:07:02 -07:00
saiello
eabc0875de Fixed sampleconfig 2015-11-06 14:07:02 -07:00
saiello
62270a3697 go fmt run over jolokia.go 2015-11-06 14:07:02 -07:00
saiello
40d8aeecb0 Use url.Parse to validate configuration params 2015-11-06 14:07:02 -07:00
saiello
2daa9ff260 Added Tags as toml field 2015-11-06 14:07:02 -07:00
Simone Aiello
25fd4297a8 Jolokia plugin first commit 2015-11-06 14:07:02 -07:00
cornerot
f05d89ed72 removed "panic" from bcache plugin
closes #343
2015-11-06 14:05:09 -07:00
Sean Beckett
4c2501be95 updating Golang crypto 2015-11-04 15:30:01 -08:00
Cameron Sparr
e2854232d0 Change HAProxy plugin tag from host to server
fixes #342
2015-11-03 11:21:58 -07:00
Cameron Sparr
6794fd06eb Suggest running as telegraf user in test mode in README
Fixes #330
2015-11-03 11:18:57 -07:00
Cameron Sparr
befc906167 Improve the HTTP JSON plugin README with more examples. 2015-11-03 10:16:59 -07:00
Cameron Sparr
422d240afb Mongodb should output 2 plugins in test mode
closes #336
2015-11-02 17:23:40 -07:00
Cameron Sparr
2b966b40f2 Completely tab-indent the Makefile 2015-11-02 14:32:06 -07:00
Sean Reifschneider
a992e16f7d On a package upgrade, restart telegraf.
closes #338
2015-11-02 13:01:58 -07:00
Cameron Sparr
0398dc1226 Dont overwrite 'host' tag in redis plugin
fixes #331
2015-11-02 11:30:49 -07:00
Eugene Dementiev
5592738603 [rabbitmq plugin] Add support for per-queue metrics
Also metrics now are gathered concurrently across servers. Fixes #185

fixes #185
closes #334
2015-11-02 11:13:24 -07:00
Eugene Dementiev
688ffd024b [amqp output] Add ability to specify influxdb database
and retention policy, as well as precision as amqp headers

closes #333
2015-11-02 11:12:09 -07:00
JP
4ac1c819e0 add elasticsearch README
closes #327
2015-11-02 11:04:43 -07:00
JP
a6e0ae2896 add ValidateTaggedFields func to testutil accumulator 2015-11-02 11:03:41 -07:00
JP
cb8499c264 optinally gather cluster and index health stats 2015-11-02 11:03:41 -07:00
Cameron Sparr
d2fb065d0d Prometheus client test refactor
closes #318
2015-10-28 16:25:15 -06:00
Tait Clarridge
4449f7f2fb Add prometheus_client service output module, update prometheus client
- Adds a client implementation using the prometheus go_client library
  that exposes metrics.

- Adds a new type of output "ServiceOutput" which follows inline with
  the "ServicePlugin", adding a Stop and Start method for the service

This change also requires the newer prometheus/client_golang code, so
the prometheus plugin needed to be changed.

Added the following to Godep:
    - bitbucket.org/ww/goautoneg (in github.com/common/expfmt/encode.go)
    - prometheus/common/expfmt (in plugins/prometheus.go)
    - github.com/prometheus/common/model (in plugins/prometheus.go)
    - github.com/prometheus/procfs (in github.com/client_golang/prometheus)
    - github.com/beorn7/perks/quantile (in github.com/client_golang/prometheus)

X-Github-Meta: closes #306
2015-10-28 15:28:39 -06:00
JP
7cc60dfb8f update mongostat from github.com/mongodb/mongo-tools
closes #323
2015-10-28 15:26:04 -06:00
Cameron Sparr
028bae8f04 Run make in circle, don't build arm and 32-bit 2015-10-28 12:30:58 -06:00
Cameron Sparr
fa9555c430 Execute "long" unit tests using docker containers
fixes #293
2015-10-28 11:45:04 -06:00
Cameron Sparr
48d11f0a5c Mongostat diff bug, less equal to less 2015-10-28 10:44:09 -06:00
Cameron Sparr
09a0c3b40f Update README & CHANGELOG with docker and NSQ changes 2015-10-27 15:47:27 -06:00
Jonathan Cross
e622bd5e7f fixing test for NoError
closes #325
2015-10-27 15:44:22 -06:00
Jonathan Cross
0d31f40e16 use index 0 of server array for nsq test 2015-10-27 15:44:22 -06:00
Jonathan Cross
e13500fc4f updated for new output Write function
removed HTTP listener port in docker compose. Not being used by plugin.
2015-10-27 15:44:22 -06:00
Jonathan Cross
2a76942a74 NSQ Output plugin
NSQ output plugin, following the NSQ methodology output is a producer
to one instance of NSQD. The go library does not accept array values be
default for a Producer. Additionally service discovery is generally
done as a consumer.

Follows same methodology as Kafka Output without the tag reference.
2015-10-27 15:44:22 -06:00
Cameron Sparr
c73c28de7e Update CHANGELOG with version 0.2.0 2015-10-27 15:43:47 -06:00
Ellison Marks
9e0ec0927c Making sure telegraf.d directory is created by packages. 2015-10-27 11:32:00 -07:00
Ellison Marks
23e6715a02 Making the field name matching when merging respect the toml struct tag.
If the field has a toml struct tag, don't try fuzzy matching, thanks to
@ekini.
2015-10-27 11:31:43 -07:00
Cameron Sparr
f7eae86cdb Update README to version 0.2.0 2015-10-26 22:05:27 -06:00
Cameron Sparr
889c0a50a4 Fixup random interval jittering 2015-10-26 13:34:31 -06:00
JP
7d15061984 add librato output plugin, update datadog plugin to skip non-number metrics
closes #322
2015-10-26 13:29:53 -06:00
Tait Clarridge
ccbfb038ee Change aerospike default config to localhost
The default config was in a non-runnable state if one were to
attempt to use it with the docker-machine setup. Changed to localhost.

closes #321
2015-10-26 10:57:10 -06:00
palkan
cb951ebd28 Add httpjson readme
closes #275
2015-10-23 18:34:27 -06:00
palkan
d35c78e933 Rename Tags to TagKeys 2015-10-23 18:33:04 -06:00
palkan
e9356c893b [Fix #190] Add httpjson tags support 2015-10-23 18:33:04 -06:00
JP
869483617b add host to metric, replace '_' with '.'
closes #312
2015-10-23 18:25:26 -06:00
palkan
df96958fb8 Use specific mysql version with docker
closes #315
2015-10-23 17:35:49 -06:00
palkan
de7ad9dfbc Replace opentsb docker image with the official one
closes #314
2015-10-23 17:34:12 -06:00
palkan
bf1cf4557e Update kafka reamde; improve intergration tests
closes #313
2015-10-23 17:33:23 -06:00
Cameron Sparr
86d20496ea Fix MySQL DSN -> tags parsing
Closes #297
2015-10-22 17:16:19 -06:00
Cameron Sparr
ae7ad2230f Support printing output with usage flag too 2015-10-22 14:24:51 -06:00
Ellison Marks
2007064c47 Fix for tags in the config not being applied to the agent.
fixes #302
closes #308
2015-10-22 13:58:59 -06:00
Cameron Sparr
c8852339c9 Do not fail Connect() in influxdb output when db creation fails
Fixes #304
2015-10-22 11:14:10 -06:00
Cameron Sparr
eb0a19062e When MongoDB freezes or restarts, do not report negative diffs
Fixes #253
2015-10-22 10:55:26 -06:00
Cameron Sparr
2f08577967 Fix output panic for -test flag 2015-10-21 18:32:43 -06:00
Cameron Sparr
891f3af504 Update CHANGELOG & README with aerospike plugin 2015-10-21 18:08:43 -06:00
Tait Clarridge
c5f200917a Add aerospike plugin support
- Does not use the aerospike client, but sends the stats command
  using the aerospike required format
- Queries available namespaces and gets stats for all of them

closes #300
2015-10-21 18:04:45 -06:00
Cameron Sparr
21622a1a17 Update CHANGELOG with new flushing options 2015-10-21 17:37:15 -06:00
Cameron Sparr
a1067fa4ae Normalize collection interval to nearest interval
closes #301
2015-10-21 17:31:27 -06:00
Ellison Marks
4395a46190 Tests for LoadDirectory.
closes #295
2015-10-21 14:07:09 -06:00
gotyaoi
c938523cd5 Implementing LoadDirectory. 2015-10-21 12:00:22 -07:00
gotyaoi
ae10fc7fb4 Fixing old tests and adding new ones for new code. 2015-10-21 12:00:21 -07:00
gotyaoi
0299a17da1 Moving the Duration wrapper to it's own package to break import loops. 2015-10-21 12:00:21 -07:00
gotyaoi
d77cfd6ecc Adding testify/suite to godep. 2015-10-21 11:59:20 -07:00
gotyaoi
03d79996de Moving away from passing around *ast.Tables.
Config in the config directory will need to be merged into the main
config, which is difficult to do using the *ast.Tables. Get the config
into structs as soon as possible and then merge the structs.
2015-10-21 11:59:19 -07:00
Eugene Dementiev
553208a960 Combine BatchPoints with the same RoutingTag to one message in amqp output
closes #287
2015-10-21 11:53:08 -06:00
Cameron Sparr
dfc59866e8 Add support for retrying output writes, using independent threads
Fixes #285
2015-10-21 11:17:01 -06:00
Cameron Sparr
ac685d19f8 Clean up logging messages and add flusher startup delay
Fixes #294
2015-10-20 16:45:31 -06:00
Joseph Dykstra
dd2e9e08df Add periods to the end of sentences
Closes #288
2015-10-20 14:20:30 -06:00
Roman Statsevich
499b5befd6 add bcache plugin
Closes #286
2015-10-20 14:17:09 -06:00
Cameron Sparr
c26ce9c4fe Utilizing new client and overhauling Accumulator interface
Fixes #280
Fixes #281
Fixes #289
2015-10-20 13:53:58 -06:00
Cameron Sparr
6263bc2d1b Godep update: influxdb 2015-10-20 10:17:34 -06:00
Cameron Sparr
f7504fb5eb InfluxDB does not accept uint64, so cast them down to int64
Fixes #290
2015-10-19 18:53:40 -06:00
Tyler Nisonoff
6869362f43 added keyspace hitrate measurement
Closes #283
2015-10-18 18:00:34 -06:00
Tyler Nisonoff
7600cc87d8 added connections measurement with user tag
Closes #284
2015-10-18 17:43:36 -06:00
Jonathan Cross
3192c78d96 fixed test to check actual value
Closes #273

caught a typo :D using it
2015-10-18 17:39:53 -06:00
Jonathan Cross
c3dad00c1b PuppetAgent Plugin
Added PuppetAgent Plugin reads last_run_summary file
2015-10-18 17:37:11 -06:00
Cameron Sparr
a1bad378d2 Turn off GOGC for faster build time in CI 2015-10-18 15:56:47 -06:00
Cameron Sparr
73f1ed4f25 Use Unix() int64 time for comparing timestamps in kafka consumer 2015-10-16 16:58:52 -06:00
Cameron Sparr
b28b4bd71e Fix ApplyTemplate change in graphite parser 2015-10-16 16:43:31 -06:00
Cameron Sparr
b15928c95e godep update: influxdb 2015-10-16 16:26:58 -06:00
Cameron Sparr
97d4f9e0ff Run go fmt in CI 2015-10-16 13:08:32 -06:00
Cameron Sparr
0986caf0ad Fix Go vet issue, test accumulator should be passed by reference with lock
Closes #276
2015-10-16 11:21:44 -06:00
palkan
9cccf8f88a Add locking to test accumulator 2015-10-16 10:58:46 -06:00
Joseph Dykstra
3ae5b4b280 Fix typos
Closes #270
2015-10-16 10:54:51 -06:00
Oskar Risberg
62b0e25b84 Add phpfpm to readme
Closes #274
2015-10-16 10:53:36 -06:00
Cameron Sparr
4e5ed9d3b9 Change config file indentation to 2 spaces 2015-10-15 15:53:29 -06:00
Sean Reifschneider
555436a222 Fix for init script for other procs with "telegraf"
The init script fails if another process has the word "telegraf" in
it, for example if you aren running "vi /etc/opt/telegraf/telegraf.conf"
or "tail -f /var/log/telegraf/telegraf.log".  This is because
the "-f" flag to "pgrep" will show processes with the search
string anywhere in the command-line.

This patch turns it around and gets the "ps" output for the process
in the pidfile, and if that line has "telegraf" in it, it considers
it to be running.

Closes #266
Closes #267
2015-10-15 15:06:05 -06:00
Cameron Sparr
6977119f1e Statsd plugin, tags and timings
Closes #237
Closes #39
2015-10-15 12:07:36 -06:00
Cameron Sparr
52be516fa3 wget and install go1.5.1 on machine 2015-10-14 17:54:00 -06:00
Cameron Sparr
2dd3eee58e Use graphite parser for templating, godep update to head 2015-10-14 17:54:00 -06:00
Cameron Sparr
d40351286a Refactoring gauges to support floats, unit tests 2015-10-14 17:54:00 -06:00
Cameron Sparr
d84a258b0a Statsd: unit tests for gauges, sets, counters 2015-10-14 17:54:00 -06:00
Cameron Sparr
eb2a4dc724 Statsd listener plugin
implement gauges, sets, counters
2015-10-14 17:54:00 -06:00
Cameron Sparr
316fa1cc01 Add recently-added plugins to list 2015-10-14 17:53:47 -06:00
Sean Reifschneider
04e2db1f41 Issue #264: Fixes for logrotate config file.
This adds copytruncate and dateext to match the Influxdb logrotate file,
and removes nocreate (again, to match influxdb).

Closes #264
Closes #265
2015-10-14 17:51:37 -06:00
Jonathan Cross
2f7d781635 remove zookeeper declaration
since spotify/kafka docker image already exposes zookeeper

Closes #262
2015-10-14 17:49:23 -06:00
Jonathan Cross
88ff269370 added measurement prefix 2015-10-14 17:48:21 -06:00
Jonathan Cross
7121e1a3b0 fixes based on comments 2015-10-14 17:48:21 -06:00
Jonathan Cross
8fd06b96d7 Zookeeper plugin
Created a zookeeper plugin that fetches from the ‘mntr’ command will
output measurements that are int and string based
2015-10-14 17:48:21 -06:00
Cameron Sparr
181c3cdc28 Update CHANGELOG with recent bugfixes
Closes #261
2015-10-13 17:58:17 -06:00
Eugene Dementiev
ccfa913186 Fix crash if login/password is incorrect in rabbitmq plugin. Closes #260
Closes #260
2015-10-13 17:54:29 -06:00
Eugene Dementiev
2a9f31bfea Add sample for exec plugin. Fixes #245
Closes #258
2015-10-13 17:53:18 -06:00
Vinh
0bc76f094a Add PHPFPM stat
- HTTP status or Socket status
- Collect those metric:
    accepted conn:
    listen queue:
    max listen queue:
    listen queue len:
    idle processes:
    active processes:
    total processes:
    max active processes:
    max children reached:
    slow requests:
- Tag metric with: `host` and `pool` name

Closes #255
2015-10-12 15:40:42 -06:00
Shirou WAKAYAMA
d394003739 add UDP socket counts and rename to 'netstat'.
Closes #244
2015-10-12 00:08:35 -06:00
Shirou WAKAYAMA
17dd058308 add REAME about TCP Connection plugin. 2015-10-12 00:05:10 -06:00
Shirou WAKAYAMA
99b1a3071d add NetConnections to the mockPS. 2015-10-12 00:05:10 -06:00
Shirou WAKAYAMA
dc38e448bd add tcp connections stat plugin. 2015-10-12 00:05:10 -06:00
Michael Bushey
1d1180ec0c telegraf-agent.toml: Fix example port and use complete examples for mysql plugin 2015-10-09 15:52:46 -07:00
Cameron Sparr
81539c4ed6 Merge pull request #252 from aristanetworks/master
Added Mountpoints option to system/disk plugin to report stats for selected mountpoints
2015-10-09 14:23:18 -06:00
subhachandrachandra
cf1dcfe37c Dropped SkipInodeUsage option as "drop" achieves the same results.
Fixed a bug in restricting Disk reporting to specific mountpoints
Added tests for the Disk.Mountpoints option
Fixed minor bug in usage of assert for the cpu tests where expected and actual values were swapped.
2015-10-08 14:17:04 -07:00
Cameron Sparr
7293376973 Race condition fix: copy BatchPoints into goroutine
Fixes #250
2015-10-08 14:27:22 -06:00
Cameron Sparr
d9f1a60a64 godep update: gopsutil 2015-10-07 16:12:55 -06:00
subhachandrachandra
4f6526e1a5 Merge remote-tracking branch 'upstream/master' 2015-10-07 14:49:47 -07:00
subhachandrachandra
e6ea09f482 Added Mountpoints and SkipInodeUsage options to the Disk plugin to control
which mountpoint stats get reported for and to skip inode stats.
2015-10-07 14:42:11 -07:00
Cameron Sparr
d620651ef6 procstat plugin, consolidate PID-getting 2015-10-07 14:13:33 -06:00
Cameron Sparr
9221f93be9 Allow procstat plugin to handle multiple PIDs from pgrep
Closes #248
2015-10-07 13:48:55 -06:00
Cameron Sparr
795ea49093 Add pid tag to procstat plugin, dont exit on error, only log 2015-10-07 11:42:50 -06:00
Ranjib Dey
6827459b9f fix typo in sample config and README
Closes #240
2015-10-07 11:19:55 -06:00
Ranjib Dey
e424d47ce6 fix plugin registration name 2015-10-07 11:11:47 -06:00
Ranjib Dey
ca0e732331 fix toml struct string 2015-10-07 11:11:47 -06:00
Ranjib Dey
8e52905ea9 add readme for procstat plugin 2015-10-07 11:11:12 -06:00
Cameron Sparr
5cc26bb640 godep update for procstat 2015-10-07 11:11:12 -06:00
Ranjib Dey
fdf00c1be6 Monitor process by pidfile or exe name 2015-10-07 11:11:12 -06:00
Cameron Sparr
47258a7093 Godep update: gopsutil 2015-10-07 10:41:44 -06:00
cornerot
5112d077d5 add tabs in the apache sampleConfig var
Closes #246
2015-10-06 10:34:03 -06:00
Cameron Sparr
b4e8a23da4 godep update: gopsutil 2015-10-05 11:10:24 -06:00
Shirou WAKAYAMA
63e9a4ae68 Fix godeps for MQTT output and remove hostname setting
Closes #241
2015-10-05 10:56:43 -06:00
Shirou WAKAYAMA
7e96a9afda Change MQTT output topic format to split plugin name. 2015-10-05 10:43:46 +09:00
Shirou WAKAYAMA
f5a225f1e0 update Godep.json 2015-10-04 23:57:40 +09:00
Shirou WAKAYAMA
6f4a3816a5 Add MQTT output. 2015-10-04 22:52:29 +09:00
subhachandrachandra
29363794c1 Merge remote-tracking branch 'upstream/master' 2015-09-30 17:02:58 -07:00
Cameron Sparr
64a3a718e6 CHANGELOG feature updates 2015-09-29 14:15:23 -07:00
Cameron Sparr
b01c28ebc6 Clean up additional logging and always print basic agent config 2015-09-29 14:06:49 -07:00
Cameron Sparr
f5d1aaf7d9 Memory plugin: re-add cached and buffered to memory plugin 2015-09-28 17:05:42 -07:00
Cameron Sparr
f6f45881da Add more logging to telegraf 2015-09-28 16:57:03 -07:00
Nick Jones
cd7468f3be Fix conditional test against useradd so it's compatible with Dash
The test to see which version of `useradd` is installed uses 'bashisms'
that fail on Ubuntu due to the fact that `/bin/sh` is symlinked to Dash,
causing the telegraf account to be created without the `--system` option
ever being passed.

This change amends the syntax so that it's POSIX-compatible and more
portable as a result.
2015-09-28 14:04:46 +01:00
subhachandrachandra
cd93b9ae0b Merge remote-tracking branch 'upstream/master'
Conflicts:
	Godeps/_workspace/src/github.com/shirou/gopsutil/mem/mem_darwin.go
	Godeps/_workspace/src/github.com/shirou/gopsutil/mem/mem_linux.go
2015-09-25 15:19:11 -07:00
Eugene Dementiev
0ffaafd788 Fix packages provides: now new version of package replaces the old one 2015-09-24 13:29:37 -07:00
Eugene Dementiev
c6283d1b5a AMQP auto reconnect feature
Closes #207
2015-09-24 10:30:00 -07:00
Josh Palay
24527859e6 Fix printf format issue
Closes #227
2015-09-23 15:44:25 -07:00
Josh Palay
0c6c5718fe Adds command intervals to exec plugin 2015-09-23 15:42:48 -07:00
Ruslan Islamgaliev
c4bbc18cb6 Make nginx_test check port in nginx module tags
Closes #223
2015-09-23 15:41:46 -07:00
Ruslan Islamgaliev
6e76759225 Add port tag to nginx plugin 2015-09-23 15:40:43 -07:00
Cameron Sparr
87ed2d4a21 Update CHANGELOG with ekini's changes and docker plugin
Closes #226
2015-09-23 15:37:21 -07:00
Eugene Dementiev
74b3309225 Add timestamps to points in Kafka/AMQP outputs 2015-09-23 15:31:37 -07:00
Cameron Sparr
1d741cbfc5 Update godep of go-dockerclient for Label access 2015-09-23 14:25:26 -07:00
Cameron Sparr
12420db4b9 docker plugin: Add docker labels as tags in
Closes #90
2015-09-23 14:20:15 -07:00
Cameron Sparr
aad6a7e262 Only run the cpu plugin twice when using -test
This can be easily extended to other plugins that need it by tacking
their name onto the switch statement. Also eliminating the unused
TestAllPlugins code and cleaning up some stray Printlns
2015-09-23 13:56:14 -07:00
Cameron Sparr
b12b804f0a Make redis password config more clear.
Also make certain that the 'host' tag does not include the password for
security reasons

Closes #225
2015-09-23 13:28:36 -07:00
Cameron Sparr
64d38ed17e Remove duplicate opentsdb docker images 2015-09-22 21:10:21 -06:00
Cameron Sparr
f8d64a7378 Redis: include per-db keyspace info
Closes #205
2015-09-22 19:46:50 -07:00
Cameron Sparr
b92a0d5126 Redis plugin, add key metrics and simplify parsing 2015-09-22 16:27:22 -07:00
Cameron Sparr
e0372358df Update changelog with info about filtering 2015-09-22 16:09:24 -07:00
Cameron Sparr
1bce6e3faf Updating README and CHANGELOG for 0.1.9 2015-09-22 13:43:37 -07:00
Cameron Sparr
81dd281789 Remove gvm from packaging script 2015-09-22 11:46:51 -07:00
Cameron Sparr
f7b38dc270 Update deb/rpm package config, package script 2015-09-22 10:56:01 -07:00
Cameron Sparr
ec9819071a Add -outputfilter flag, and refactor the filter flag to work for -sample-config
Closes #211
Issue #199
2015-09-22 10:56:01 -07:00
Ruslan Islamgaliev
72edc3c4fe Select default apache port depending on url scheme 2015-09-22 10:53:53 -07:00
Ruslan Islamgaliev
5657e8d1da Add port tag to apache plugin 2015-09-22 10:52:45 -07:00
Cameron Sparr
0700e0cf94 Update gopsutil godep dependency
Closes #219
2015-09-22 09:28:37 -07:00
Cameron Sparr
1cd2db9f8c Memory plugin: use 'available' instead of 'actual_'
Closes #214
2015-09-21 17:39:39 -07:00
Cameron Sparr
10d411c4f7 Update new memory unit tests, documentation 2015-09-21 17:22:24 -07:00
Cameron Sparr
167b8b8eb8 Godep update gopsutil to get darwin mem fix 2015-09-21 17:22:24 -07:00
Cameron Sparr
74da03d9fa Refactor memory stats, remove some, add 'actual_' stats 2015-09-21 17:22:23 -07:00
Cameron Sparr
b8a58dad65 Fix CPU unit tests for time_ prefix 2015-09-21 17:08:50 -07:00
Cameron Sparr
b012713cf2 Adding time_ prefix to all CPU time measurements 2015-09-21 10:23:46 -07:00
Cameron Sparr
82d914149e Adding a retry to the initial telegraf database connection
Fixes #187
2015-09-18 18:03:47 -07:00
Eugene Dementiev
450f5e03a5 Add shebang to postinstall script (fixes installation on Debian family)
Closes #212
2015-09-18 15:07:11 -07:00
Eugene Dementiev
b04706b875 Fix makefile warning for go1.5 2015-09-18 21:16:08 +03:00
Cameron Sparr
10b0438201 Remove cpu_usage_busy, this is simply 100-cpu_usage_idle 2015-09-17 17:46:35 -07:00
Cameron Sparr
0270ace3d4 Add a CPU collection plugin README 2015-09-17 17:46:34 -07:00
Cameron Sparr
bbb27fa484 Update gopsutil dependency to enable 32-bit builds 2015-09-17 17:46:34 -07:00
Cameron Sparr
df15e7b379 Remove non-existent 'stolen' cpu stat, fix measurement names 2015-09-17 17:46:34 -07:00
Cameron Sparr
df651ab98e Properly vendor the gopsutil dependency 2015-09-17 17:46:34 -07:00
Cameron Sparr
dd7a3b37b0 Delete 'vendored' gopsutil directory 2015-09-17 17:46:34 -07:00
Tim Allen
94a623c00e Check if file exists before running disk usage on it. Not all mounts are normal files.
Closes #208
2015-09-17 17:45:58 -07:00
Cameron Sparr
6cb0f2d392 Revert godep updates, needs a fix in influxdb repo 2015-09-17 00:57:39 -07:00
Cameron Sparr
17e165382f Add amqp/rabbitmq to output list in readme 2015-09-16 17:30:10 -07:00
Cameron Sparr
733ba07312 Changing AddValues to AddFields and temp disabling adding w time
Currently adding with time is broken, because InfluxDB does not support
using precision for timestamp truncation both with and without
timestamps. This will be re-enabled once we fix InfluxDB to use the
precision argument for truncation in all cases, and a "unit" argument
in the line-protocol for adding points with non-nanosecond stamps

Fixes #175
2015-09-16 16:59:48 -07:00
Cameron Sparr
46cd9ff9f5 Update influxdb godeps for line-protocol precision fix 2015-09-16 16:59:48 -07:00
Cameron Sparr
66ed4f7328 mysql plugin: don't emit blank tags
closes #201
2015-09-16 14:24:38 -07:00
Cameron Sparr
3be6d84675 Catching up on some CHANGELOG updates 2015-09-16 14:23:57 -07:00
Eugene Dementiev
406e980fae install and init script for el5
Fixes #186
Closes #203
2015-09-16 14:19:57 -07:00
Oliver Buschjost
211065565f Add HTTP 5xx stats to HAProxy plugin. Closes #194 2015-09-16 14:10:09 -07:00
Cameron Sparr
d979ee5573 AMQP routing tag doc & add routing tag for Kafka
closes #200
2015-09-16 12:10:26 -07:00
Roman Plessl
c843b53c30 added docker image unit test with OpenTSDB 2015-09-16 11:01:17 -07:00
Eugene Dementiev
5d280e4d25 AMQP output plugin typo fixes and added README and RoutingTag 2015-09-16 10:59:29 -07:00
Eugene Dementiev
f00d43aa09 Added amqp output 2015-09-16 10:58:38 -07:00
Cameron Sparr
2e68d3cb3c Merge pull request #198 from mced/fix_mem_used_perc
[fix] mem_used_perc returns percentage of used mem
2015-09-15 15:24:48 -07:00
Cédric Menassa
4d6f11b61f [fix] mem_used_perc returns percentage of used mem 2015-09-15 12:58:51 +02:00
Kevin Bouwkamp
aac9ba6c1e add bugfix in CHANGELOG and some notes in pg README
Closes #192
2015-09-14 18:48:01 -07:00
Kevin Bouwkamp
d926a3b5da no longer duplicate ignored columns here 2015-09-14 18:47:07 -07:00
Kevin Bouwkamp
fa5753c579 Makes the test also work across pg versions 2015-09-14 18:47:07 -07:00
Kevin Bouwkamp
3fa3b2d836 add some comments 2015-09-14 18:47:07 -07:00
Kevin Bouwkamp
76041e84e8 fix some more indentation... 2015-09-14 18:47:07 -07:00
Kevin Bouwkamp
19c6572926 Add a few notes about the connection strings 2015-09-14 18:47:07 -07:00
Kevin Bouwkamp
2217fb8c58 uncomment to skip test in short mode 2015-09-14 18:47:07 -07:00
Kevin Bouwkamp
50fcb3914d Generating metric information dynamically. Makes compatible with postgresql versions < 9.2 2015-09-14 18:47:06 -07:00
Roman Plessl
9a0c0886ce added more UNIT test cases for covering all parts of the code
added debug statement for debugging OpenTSDB communication

Closes #182
2015-09-14 18:43:56 -07:00
Roman Plessl
fc41cc9878 added prefix settings of the module and rearrange go test code 2015-09-14 18:41:43 -07:00
Roman Plessl
08b220a1fb added docker image unit test with OpenTSDB 2015-09-14 18:41:43 -07:00
Roman Plessl
7e3beaf822 fix spaces with gofmt 2015-09-14 18:41:43 -07:00
Roman Plessl
d2150efc19 added readme as suggested / whished in #177 2015-09-14 18:41:43 -07:00
Roman Plessl
380146b75b added opentsdb as sink 2015-09-14 18:41:43 -07:00
Roman Plessl
2bf096cfc7 adds opentsdb telnet output plugin 2015-09-14 18:41:42 -07:00
Roman Plessl
cb887dee81 change/fix expected test result 2015-09-14 18:41:42 -07:00
Roman Plessl
2ee7d5eeb6 code improvements after running tests / compile step 2015-09-14 18:41:42 -07:00
mced
6d6158ff08 [fix] mem_used_perc returns percentage of used mem
Closes #189
2015-09-14 12:18:31 -07:00
Cameron Sparr
11126cf4ae Add a server name tag to the RabbitMQ server list
Fixes #183
2015-09-11 16:46:49 -07:00
Ruslan Islamgaliev
bd00f46d8b Fix docker stats to make it work on centos 7.
issue #58
issue #84
2015-09-11 16:26:08 -07:00
Cameron Sparr
d8482cc286 darwin net plugin fix, really need to godep vendor gopsutil 2015-09-10 13:57:57 -06:00
Cameron Sparr
f7a4317990 Fix multiple redis server bug, do not cache the TCP connections
Fixes #178
2015-09-10 11:51:15 -06:00
Vye Wilson
a55f6498c8 Makefile will now honor GOBIN, if set
Closes #181
2015-09-10 11:50:11 -06:00
Cameron Sparr
81f4aa9a5d Fix bug in setting the precision before gathering metrics
Closes #175
2015-09-09 21:29:55 -06:00
Cameron Sparr
3c7c8926fb Support InfluxDB clusters
Closes #143
2015-09-09 17:57:17 -06:00
Cameron Sparr
a7ed46160a Re-arrange repo files for root dir cleanup 2015-09-09 12:38:51 -06:00
Cameron Sparr
a9b97c7a2b Bump go version number to 1.5 2015-09-09 12:07:58 -06:00
Cameron Sparr
0780ad4ad9 README updates for systemd and deb/rpm install 2015-09-09 12:04:59 -06:00
Cameron Sparr
bf9992b613 Update telegraf.service and packaging script for systemd
Deals with most of #170
2015-09-08 18:23:18 -06:00
Cameron Sparr
8c5e1ff0a0 Update README plugins list 2015-09-04 17:05:50 -06:00
Cameron Sparr
b3044a6e2b Put all ARCH binaries on the README 2015-09-04 16:37:07 -06:00
Cameron Sparr
6260dd1018 Makefile rule for building all linux binaries, and upload all ARCHs 2015-09-04 14:12:50 -06:00
Cameron Sparr
e47801074e package.sh script fixes for uploading binaries 2015-09-04 13:19:13 -06:00
Cameron Sparr
6d42973d7c Update package script and readme for 0.1.8 2015-09-04 12:53:29 -06:00
Cameron Sparr
68e41f130c Ping plugin
Closes #167
2015-09-04 11:20:49 -06:00
Cameron Sparr
65b33a848e Fix default installed config for consistency 2015-09-02 14:25:40 -06:00
Cameron Sparr
5bfb6df0e0 Write data in UTC by default and use 's' precision
Closes #159
Closes #162
2015-09-02 14:19:36 -06:00
Cameron Sparr
13061d1ec7 package.sh: upload raw binaries to S3
Closes #166
2015-09-02 12:05:29 -06:00
nickscript0
0143a4227e add additional metrics to mysql plugin tests
Closes #165
2015-09-02 11:49:16 -06:00
nickscript0
3f63bcde12 add additional MySQL metrics 2015-09-02 11:48:38 -06:00
Michael Wood
b86c6bba4e README: Say when tagpass/tagdrop are valid from.
closes #163
2015-09-02 09:33:05 -06:00
Cameron Sparr
4d19fc0860 Fixup for g->r change, io.reader was already using 'r' 2015-08-31 16:15:38 -06:00
Cameron Sparr
9c57c30e57 Redis plugin internal names consistency fix, g -> r 2015-08-31 15:57:52 -06:00
Cameron Sparr
9969c4e810 Add system uptime metric, string formatted AND in float64
closes #150
2015-08-31 14:43:34 -06:00
Alexander Oleinik
e2bc5d80c9 Apache Plugin
Closes #158
Fixes #132
2015-08-31 10:17:18 -06:00
Michael Desa
ab191e2b58 Rename DEPENDENCY_LICENSES LICENSE_OF_DEPENDENCIES
Closes #155
Closes #154
2015-08-28 19:37:23 -06:00
Michael Desa
d418a6e872 Add list of dependency licenses 2015-08-28 16:17:46 -07:00
Cameron Sparr
bdfd1aef62 Update README with 0.1.7 and make separate CONTRIBUTING file 2015-08-28 10:21:22 -06:00
Cameron Sparr
ff2de0c715 Only build the docker plugin on linux 2015-08-27 17:09:18 -06:00
Cameron Sparr
5b78b1e548 Clean up agent error handling and logging of outputs/plugins
Closes #145
2015-08-27 13:41:19 -06:00
Cameron Sparr
d1f965ae30 Kafka output producer, send telegraf metrics to Kafka brokers
Closes #38
2015-08-26 17:03:58 -06:00
Cameron Sparr
434267898b Indent the toml config for readability 2015-08-26 09:22:03 -06:00
Cameron Sparr
a00510a73c Outputs enhancement to require Description and SampleConfig functions
Closes #142
2015-08-26 07:34:26 -06:00
Cameron Sparr
846fd31121 Improve build from source instructions
Closes #141
2015-08-25 18:18:56 -06:00
Cameron Sparr
ab4344a781 Merge problem, re-enable non-standard DB names 2015-08-25 16:52:16 -06:00
Cameron Sparr
ac97fefb91 makefile: ADVERTISED_HOST needs only be set during docker-compose target 2015-08-25 16:34:30 -06:00
subhachandrachandra
8d034f544c Fixed memory reporting for Linux systems
/proc/meminfo reports memory in KiloBytes and so needs a multiplier of 1024 instead of 1000.
The kernel reports in terms of pages and the proc filesystem is left shifting by 2 for 4KB pages to get KB. Since this is a binary shift, Bytes will need to shift by 10 and so get multiplied by 1024.

From the kernel code. PAGE_SHIFT = 12 for 4KB pages
"MemTotal:       %8lu kB\n", K(i.totalram)

Closes #131
2015-08-25 14:18:14 -06:00
subhachandrachandra
ca1d2c7000 Fixed total memory reporting for Darwin systems. hw.memsize is reported as bytes instead of pages. 2015-08-25 14:16:18 -06:00
Bruno Bigras
0acf15c025 Typo: prec -> perc
Closes #140
2015-08-25 14:15:12 -06:00
Cameron Sparr
94eed9b43c Add MySQL server address tag to all measurements
Closes #138
2015-08-25 13:58:55 -06:00
Bruno Bigras
8a6665c03f memcached: fix when a value contains a space
Fixes #137
Closes #139
2015-08-25 13:14:40 -06:00
Cameron Sparr
85ae6fffbb Vagrantfile: do a one-way rsync so that binaries don't get shared between VMs and host 2015-08-25 11:54:12 -06:00
Cameron Sparr
bd85a36cb1 Fixes #130, document mysql plugin better, README 2015-08-24 15:08:16 -06:00
Cameron Sparr
a449e4b47c Add #136 to CHANGELOG 2015-08-24 14:56:50 -06:00
Cameron Sparr
42602a3f35 Provide a -usage flag for printing the usage of a single plugin
Closes #136
2015-08-24 14:52:46 -06:00
Cameron Sparr
50f902cb02 Fixes #128, add system load and swap back to default Telegraf config 2015-08-24 13:26:21 -06:00
nickscript0
b014ac12ee Update CHANGELOG.md 2015-08-24 13:09:23 -06:00
nickscript0
610f24e0cd Update CHANGELOG.md 2015-08-24 13:09:23 -06:00
nsvarich
f45f7e56fd add plugin.name to error message 2015-08-24 13:09:23 -06:00
nickscript0
afe366d6b7 go fmt remove whitespace 2015-08-24 13:09:23 -06:00
nickscript0
1daa059ef9 Log plugin errors in crankParallel and crankSeparate cases. Previously errors weren't logged in these cases. 2015-08-24 13:09:23 -06:00
Cameron Sparr
9777aa6165 Update README to point to url without 'v' prepended to version 2015-08-24 10:48:21 -06:00
Cameron Sparr
143ec1a019 Filter out the 'v' from the version tag, issue #134 2015-08-24 10:39:15 -06:00
subhachandrachandra
13ee9ff37b Fixed memory reporting for Linux systems
/proc/meminfo reports memory in KiloBytes and so needs a multiplier of 1024 instead of 1000.
The kernel reports in terms of pages and the proc filesystem is left shifting by 2 for 4KB pages to get KB. Since this is a binary shift, Bytes will need to shift by 10 and so get multiplied by 1024.

From the kernel code. PAGE_SHIFT = 12 for 4KB pages
"MemTotal:       %8lu kB\n", K(i.totalram)
2015-08-21 16:08:54 -07:00
subhachandrachandra
a3c846b73e Fixed total memory reporting for Darwin systems. hw.memsize is reported as bytes instead of pages. 2015-08-21 15:15:19 -07:00
Cameron Sparr
3d05575e9d Fix for #129 README typo in the 0.1.6 package name url 2015-08-21 10:43:19 -06:00
Cameron Sparr
9d00b5e165 Version= doesnt work on go1.4.2
fixing makefile & vagrantfile & build script to reflect that
2015-08-20 16:43:25 -06:00
Cameron Sparr
a29b39e17a README typo fix 2015-08-20 15:18:45 -06:00
1534 changed files with 66124 additions and 273356 deletions

2
.gitattributes vendored Normal file
View File

@@ -0,0 +1,2 @@
CHANGELOG.md merge=union

5
.gitignore vendored
View File

@@ -1,3 +1,6 @@
tivan
.vagrant
telegraf
/telegraf
.idea
*~
*#

View File

@@ -1,76 +1,709 @@
## v0.13 [2016-05-09]
### Release Notes
- **Breaking change** in jolokia plugin. See
https://github.com/influxdata/telegraf/blob/master/plugins/inputs/jolokia/README.md
for updated configuration. The plugin will now support proxy mode and will make
POST requests.
- New [agent] configuration option: `metric_batch_size`. This option tells
telegraf the maximum batch size to allow to accumulate before sending a flush
to the configured outputs. `metric_buffer_limit` now refers to the absolute
maximum number of metrics that will accumulate before metrics are dropped.
- There is no longer an option to
`flush_buffer_when_full`, this is now the default and only behavior of telegraf.
- **Breaking Change**: docker plugin tags. The cont_id tag no longer exists, it
will now be a field, and be called container_id. Additionally, cont_image and
cont_name are being renamed to container_image and container_name.
- **Breaking Change**: docker plugin measurements. The `docker_cpu`, `docker_mem`,
`docker_blkio` and `docker_net` measurements are being renamed to
`docker_container_cpu`, `docker_container_mem`, `docker_container_blkio` and
`docker_container_net`. Why? Because these metrics are
specifically tracking per-container stats. The problem with per-container stats,
in some use-cases, is that if containers are short-lived AND names are not
kept consistent, then the series cardinality will balloon very quickly.
So adding "container" to each metric will:
(1) make it more clear that these metrics are per-container, and
(2) allow users to easily drop per-container metrics if cardinality is an
issue (`namedrop = ["docker_container_*"]`)
- `tagexclude` and `taginclude` are now available, which can be used to remove
tags from measurements on inputs and outputs. See
[the configuration doc](https://github.com/influxdata/telegraf/blob/master/docs/CONFIGURATION.md)
for more details.
- **Measurement filtering:** All measurement filters now match based on glob
only. Previously there was an undocumented behavior where filters would match
based on _prefix_ in addition to globs. This means that a filter like
`fielddrop = ["time_"]` will need to be changed to `fielddrop = ["time_*"]`
- **datadog**: measurement and field names will no longer have `_` replaced by `.`
- The following plugins have changed their tags to _not_ overwrite the host tag:
- cassandra: `host -> cassandra_host`
- disque: `host -> disque_host`
- rethinkdb: `host -> rethinkdb_host`
- **Breaking Change**: The `win_perf_counters` input has been changed to
sanitize field names, replacing `/Sec` and `/sec` with `_persec`, as well as
spaces with underscores. This is needed because Graphite doesn't like slashes
and spaces, and was failing to accept metrics that had them.
The `/[sS]ec` -> `_persec` is just to make things clearer and uniform.
- **Breaking Change**: snmp plugin. The `host` tag of the snmp plugin has been
changed to the `snmp_host` tag.
- The `disk` input plugin can now be configured with the `HOST_MOUNT_PREFIX` environment variable.
This value is prepended to any mountpaths discovered before retrieving stats.
It is not included on the report path. This is necessary for reporting host disk stats when running from within a container.
### Features
- [#1031](https://github.com/influxdata/telegraf/pull/1031): Jolokia plugin proxy mode. Thanks @saiello!
- [#1017](https://github.com/influxdata/telegraf/pull/1017): taginclude and tagexclude arguments.
- [#1015](https://github.com/influxdata/telegraf/pull/1015): Docker plugin schema refactor.
- [#889](https://github.com/influxdata/telegraf/pull/889): Improved MySQL plugin. Thanks @maksadbek!
- [#1060](https://github.com/influxdata/telegraf/pull/1060): TTL metrics added to MongoDB input plugin
- [#1056](https://github.com/influxdata/telegraf/pull/1056): Don't allow inputs to overwrite host tags.
- [#1035](https://github.com/influxdata/telegraf/issues/1035): Add `user`, `exe`, `pidfile` tags to procstat plugin.
- [#1041](https://github.com/influxdata/telegraf/issues/1041): Add `n_cpus` field to the system plugin.
- [#1072](https://github.com/influxdata/telegraf/pull/1072): New Input Plugin: filestat.
- [#1066](https://github.com/influxdata/telegraf/pull/1066): Replication lag metrics for MongoDB input plugin
- [#1086](https://github.com/influxdata/telegraf/pull/1086): Ability to specify AWS keys in config file. Thanks @johnrengleman!
- [#1096](https://github.com/influxdata/telegraf/pull/1096): Performance refactor of running output buffers.
- [#967](https://github.com/influxdata/telegraf/issues/967): Buffer logging improvements.
- [#1107](https://github.com/influxdata/telegraf/issues/1107): Support lustre2 job stats. Thanks @hanleyja!
- [#1122](https://github.com/influxdata/telegraf/pull/1122): Support setting config path through env variable and default paths.
- [#1128](https://github.com/influxdata/telegraf/pull/1128): MongoDB jumbo chunks metric for MongoDB input plugin
- [#1146](https://github.com/influxdata/telegraf/pull/1146): HAProxy socket support. Thanks weshmashian!
### Bugfixes
- [#1050](https://github.com/influxdata/telegraf/issues/1050): jolokia plugin - do not overwrite host tag. Thanks @saiello!
- [#921](https://github.com/influxdata/telegraf/pull/921): mqtt_consumer stops gathering metrics. Thanks @chaton78!
- [#1013](https://github.com/influxdata/telegraf/pull/1013): Close dead riemann output connections. Thanks @echupriyanov!
- [#1012](https://github.com/influxdata/telegraf/pull/1012): Set default tags in test accumulator.
- [#1024](https://github.com/influxdata/telegraf/issues/1024): Don't replace `.` with `_` in datadog output.
- [#1058](https://github.com/influxdata/telegraf/issues/1058): Fix possible leaky TCP connections in influxdb output.
- [#1044](https://github.com/influxdata/telegraf/pull/1044): Fix SNMP OID possible collisions. Thanks @relip
- [#1022](https://github.com/influxdata/telegraf/issues/1022): Dont error deb/rpm install on systemd errors.
- [#1078](https://github.com/influxdata/telegraf/issues/1078): Use default AWS credential chain.
- [#1070](https://github.com/influxdata/telegraf/issues/1070): SQL Server input. Fix datatype conversion.
- [#1089](https://github.com/influxdata/telegraf/issues/1089): Fix leaky TCP connections in phpfpm plugin.
- [#914](https://github.com/influxdata/telegraf/issues/914): Telegraf can drop metrics on full buffers.
- [#1098](https://github.com/influxdata/telegraf/issues/1098): Sanitize invalid OpenTSDB characters.
- [#1110](https://github.com/influxdata/telegraf/pull/1110): Sanitize * to - in graphite serializer. Thanks @goodeggs!
- [#1118](https://github.com/influxdata/telegraf/pull/1118): Sanitize Counter names for `win_perf_counters` input.
- [#1125](https://github.com/influxdata/telegraf/pull/1125): Wrap all exec command runners with a timeout, so hung os processes don't halt Telegraf.
- [#1113](https://github.com/influxdata/telegraf/pull/1113): Set MaxRetry and RequiredAcks defaults in Kafka output.
- [#1090](https://github.com/influxdata/telegraf/issues/1090): [agent] and [global_tags] config sometimes not getting applied.
- [#1133](https://github.com/influxdata/telegraf/issues/1133): Use a timeout for docker list & stat cmds.
- [#1052](https://github.com/influxdata/telegraf/issues/1052): Docker panic fix when decode fails.
- [#1136](https://github.com/influxdata/telegraf/pull/1136): "DELAYED" Inserts were deprecated in MySQL 5.6.6. Thanks @PierreF
## v0.12.1 [2016-04-14]
### Release Notes
- Breaking change in the dovecot input plugin. See Features section below.
- Graphite output templates are now supported. See
https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md#graphite
- Possible breaking change for the librato and graphite outputs. Telegraf will
no longer insert field names when the field is simply named `value`. This is
because the `value` field is redundant in the graphite/librato context.
### Features
- [#1009](https://github.com/influxdata/telegraf/pull/1009): Cassandra input plugin. Thanks @subhachandrachandra!
- [#976](https://github.com/influxdata/telegraf/pull/976): Reduce allocations in the UDP and statsd inputs.
- [#979](https://github.com/influxdata/telegraf/pull/979): Reduce allocations in the TCP listener.
- [#992](https://github.com/influxdata/telegraf/pull/992): Refactor allocations in TCP/UDP listeners.
- [#935](https://github.com/influxdata/telegraf/pull/935): AWS Cloudwatch input plugin. Thanks @joshhardy & @ljosa!
- [#943](https://github.com/influxdata/telegraf/pull/943): http_response input plugin. Thanks @Lswith!
- [#939](https://github.com/influxdata/telegraf/pull/939): sysstat input plugin. Thanks @zbindenren!
- [#998](https://github.com/influxdata/telegraf/pull/998): **breaking change** enabled global, user and ip queries in dovecot plugin. Thanks @mikif70!
- [#1001](https://github.com/influxdata/telegraf/pull/1001): Graphite serializer templates.
- [#1008](https://github.com/influxdata/telegraf/pull/1008): Adding memstats metrics to the influxdb plugin.
### Bugfixes
- [#968](https://github.com/influxdata/telegraf/issues/968): Processes plugin gets unknown state when spaces are in (command name)
- [#969](https://github.com/influxdata/telegraf/pull/969): ipmi_sensors: allow : in password. Thanks @awaw!
- [#972](https://github.com/influxdata/telegraf/pull/972): dovecot: remove extra newline in dovecot command. Thanks @mrannanj!
- [#645](https://github.com/influxdata/telegraf/issues/645): docker plugin i/o error on closed pipe. Thanks @tripledes!
## v0.12.0 [2016-04-05]
### Features
- [#951](https://github.com/influxdata/telegraf/pull/951): Parse environment variables in the config file.
- [#948](https://github.com/influxdata/telegraf/pull/948): Cleanup config file and make default package version include all plugins (but commented).
- [#927](https://github.com/influxdata/telegraf/pull/927): Adds parsing of tags to the statsd input when using DataDog's dogstatsd extension
- [#863](https://github.com/influxdata/telegraf/pull/863): AMQP output: allow external auth. Thanks @ekini!
- [#707](https://github.com/influxdata/telegraf/pull/707): Improved prometheus plugin. Thanks @titilambert!
- [#878](https://github.com/influxdata/telegraf/pull/878): Added json serializer. Thanks @ch3lo!
- [#880](https://github.com/influxdata/telegraf/pull/880): Add the ability to specify the bearer token to the prometheus plugin. Thanks @jchauncey!
- [#882](https://github.com/influxdata/telegraf/pull/882): Fixed SQL Server Plugin issues
- [#849](https://github.com/influxdata/telegraf/issues/849): Adding ability to parse single values as an input data type.
- [#844](https://github.com/influxdata/telegraf/pull/844): postgres_extensible plugin added. Thanks @menardorama!
- [#866](https://github.com/influxdata/telegraf/pull/866): couchbase input plugin. Thanks @ljosa!
- [#789](https://github.com/influxdata/telegraf/pull/789): Support multiple field specification and `field*` in graphite templates. Thanks @chrusty!
- [#762](https://github.com/influxdata/telegraf/pull/762): Nagios parser for the exec plugin. Thanks @titilambert!
- [#848](https://github.com/influxdata/telegraf/issues/848): Provide option to omit host tag from telegraf agent.
- [#928](https://github.com/influxdata/telegraf/pull/928): Deprecating the statsd "convert_names" options, expose separator config.
- [#919](https://github.com/influxdata/telegraf/pull/919): ipmi_sensor input plugin. Thanks @ebookbug!
- [#945](https://github.com/influxdata/telegraf/pull/945): KAFKA output: codec, acks, and retry configuration. Thanks @framiere!
### Bugfixes
- [#890](https://github.com/influxdata/telegraf/issues/890): Create TLS config even if only ssl_ca is provided.
- [#884](https://github.com/influxdata/telegraf/issues/884): Do not call write method if there are 0 metrics to write.
- [#898](https://github.com/influxdata/telegraf/issues/898): Put database name in quotes, fixes special characters in the database name.
- [#656](https://github.com/influxdata/telegraf/issues/656): No longer run `lsof` on linux to get netstat data, fixes permissions issue.
- [#907](https://github.com/influxdata/telegraf/issues/907): Fix prometheus invalid label/measurement name key.
- [#841](https://github.com/influxdata/telegraf/issues/841): Fix memcached unix socket panic.
- [#873](https://github.com/influxdata/telegraf/issues/873): Fix SNMP plugin sometimes not returning metrics. Thanks @titiliambert!
- [#934](https://github.com/influxdata/telegraf/pull/934): phpfpm: Fix fcgi uri path. Thanks @rudenkovk!
- [#805](https://github.com/influxdata/telegraf/issues/805): Kafka consumer stops gathering after i/o timeout.
- [#959](https://github.com/influxdata/telegraf/pull/959): reduce mongodb & prometheus collection timeouts. Thanks @PierreF!
## v0.11.1 [2016-03-17]
### Release Notes
- Primarily this release was cut to fix [#859](https://github.com/influxdata/telegraf/issues/859)
### Features
- [#747](https://github.com/influxdata/telegraf/pull/747): Start telegraf on install & remove on uninstall. Thanks @pierref!
- [#794](https://github.com/influxdata/telegraf/pull/794): Add service reload ability. Thanks @entertainyou!
### Bugfixes
- [#852](https://github.com/influxdata/telegraf/issues/852): Windows zip package fix
- [#859](https://github.com/influxdata/telegraf/issues/859): httpjson plugin panic
## v0.11.0 [2016-03-15]
### Release Notes
### Features
- [#692](https://github.com/influxdata/telegraf/pull/770): Support InfluxDB retention policies
- [#771](https://github.com/influxdata/telegraf/pull/771): Default timeouts for input plugns. Thanks @PierreF!
- [#758](https://github.com/influxdata/telegraf/pull/758): UDP Listener input plugin, thanks @whatyouhide!
- [#769](https://github.com/influxdata/telegraf/issues/769): httpjson plugin: allow specifying SSL configuration.
- [#735](https://github.com/influxdata/telegraf/pull/735): SNMP Table feature. Thanks @titilambert!
- [#754](https://github.com/influxdata/telegraf/pull/754): docker plugin: adding `docker info` metrics to output. Thanks @titilambert!
- [#788](https://github.com/influxdata/telegraf/pull/788): -input-list and -output-list command-line options. Thanks @ebookbug!
- [#778](https://github.com/influxdata/telegraf/pull/778): Adding a TCP input listener.
- [#797](https://github.com/influxdata/telegraf/issues/797): Provide option for persistent MQTT consumer client sessions.
- [#799](https://github.com/influxdata/telegraf/pull/799): Add number of threads for procstat input plugin. Thanks @titilambert!
- [#776](https://github.com/influxdata/telegraf/pull/776): Add Zookeeper chroot option to kafka_consumer. Thanks @prune998!
- [#811](https://github.com/influxdata/telegraf/pull/811): Add processes plugin for classifying total procs on system. Thanks @titilambert!
- [#235](https://github.com/influxdata/telegraf/issues/235): Add number of users to the `system` input plugin.
- [#826](https://github.com/influxdata/telegraf/pull/826): "kernel" linux plugin for /proc/stat metrics (context switches, interrupts, etc.)
- [#847](https://github.com/influxdata/telegraf/pull/847): `ntpq`: Input plugin for running ntp query executable and gathering metrics.
### Bugfixes
- [#748](https://github.com/influxdata/telegraf/issues/748): Fix sensor plugin split on ":"
- [#722](https://github.com/influxdata/telegraf/pull/722): Librato output plugin fixes. Thanks @chrusty!
- [#745](https://github.com/influxdata/telegraf/issues/745): Fix Telegraf toml parse panic on large config files. Thanks @titilambert!
- [#781](https://github.com/influxdata/telegraf/pull/781): Fix mqtt_consumer username not being set. Thanks @chaton78!
- [#786](https://github.com/influxdata/telegraf/pull/786): Fix mqtt output username not being set. Thanks @msangoi!
- [#773](https://github.com/influxdata/telegraf/issues/773): Fix duplicate measurements in snmp plugin. Thanks @titilambert!
- [#708](https://github.com/influxdata/telegraf/issues/708): packaging: build ARM package
- [#713](https://github.com/influxdata/telegraf/issues/713): packaging: insecure permissions error on log directory
- [#816](https://github.com/influxdata/telegraf/issues/816): Fix phpfpm panic if fcgi endpoint unreachable.
- [#828](https://github.com/influxdata/telegraf/issues/828): fix net_response plugin overwriting host tag.
- [#821](https://github.com/influxdata/telegraf/issues/821): Remove postgres password from server tag. Thanks @menardorama!
## v0.10.4.1
### Release Notes
- Bug in the build script broke deb and rpm packages.
### Bugfixes
- [#750](https://github.com/influxdata/telegraf/issues/750): deb package broken
- [#752](https://github.com/influxdata/telegraf/issues/752): rpm package broken
## v0.10.4 [2016-02-24]
### Release Notes
- The pass/drop parameters have been renamed to fielddrop/fieldpass parameters,
to more accurately indicate their purpose.
- There are also now namedrop/namepass parameters for passing/dropping based
on the metric _name_.
- Experimental windows builds now available.
### Features
- [#727](https://github.com/influxdata/telegraf/pull/727): riak input, thanks @jcoene!
- [#694](https://github.com/influxdata/telegraf/pull/694): DNS Query input, thanks @mjasion!
- [#724](https://github.com/influxdata/telegraf/pull/724): username matching for procstat input, thanks @zorel!
- [#736](https://github.com/influxdata/telegraf/pull/736): Ignore dummy filesystems from disk plugin. Thanks @PierreF!
- [#737](https://github.com/influxdata/telegraf/pull/737): Support multiple fields for statsd input. Thanks @mattheath!
### Bugfixes
- [#701](https://github.com/influxdata/telegraf/pull/701): output write count shouldnt print in quiet mode.
- [#746](https://github.com/influxdata/telegraf/pull/746): httpjson plugin: Fix HTTP GET parameters.
## v0.10.3 [2016-02-18]
### Release Notes
- Users of the `exec` and `kafka_consumer` (and the new `nats_consumer`
and `mqtt_consumer` plugins) can now specify the incoming data
format that they would like to parse. Currently supports: "json", "influx", and
"graphite"
- Users of message broker and file output plugins can now choose what data format
they would like to output. Currently supports: "influx" and "graphite"
- More info on parsing _incoming_ data formats can be found
[here](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md)
- More info on serializing _outgoing_ data formats can be found
[here](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md)
- Telegraf now has an option `flush_buffer_when_full` that will flush the
metric buffer whenever it fills up for each output, rather than dropping
points and only flushing on a set time interval. This will default to `true`
and is in the `[agent]` config section.
### Features
- [#652](https://github.com/influxdata/telegraf/pull/652): CouchDB Input Plugin. Thanks @codehate!
- [#655](https://github.com/influxdata/telegraf/pull/655): Support parsing arbitrary data formats. Currently limited to kafka_consumer and exec inputs.
- [#671](https://github.com/influxdata/telegraf/pull/671): Dovecot input plugin. Thanks @mikif70!
- [#680](https://github.com/influxdata/telegraf/pull/680): NATS consumer input plugin. Thanks @netixen!
- [#676](https://github.com/influxdata/telegraf/pull/676): MQTT consumer input plugin.
- [#683](https://github.com/influxdata/telegraf/pull/683): PostGRES input plugin: add pg_stat_bgwriter. Thanks @menardorama!
- [#679](https://github.com/influxdata/telegraf/pull/679): File/stdout output plugin.
- [#679](https://github.com/influxdata/telegraf/pull/679): Support for arbitrary output data formats.
- [#695](https://github.com/influxdata/telegraf/pull/695): raindrops input plugin. Thanks @burdandrei!
- [#650](https://github.com/influxdata/telegraf/pull/650): net_response input plugin. Thanks @titilambert!
- [#699](https://github.com/influxdata/telegraf/pull/699): Flush based on buffer size rather than time.
- [#682](https://github.com/influxdata/telegraf/pull/682): Mesos input plugin. Thanks @tripledes!
### Bugfixes
- [#443](https://github.com/influxdata/telegraf/issues/443): Fix Ping command timeout parameter on Linux.
- [#662](https://github.com/influxdata/telegraf/pull/667): Change `[tags]` to `[global_tags]` to fix multiple-plugin tags bug.
- [#642](https://github.com/influxdata/telegraf/issues/642): Riemann output plugin issues.
- [#394](https://github.com/influxdata/telegraf/issues/394): Support HTTP POST. Thanks @gabelev!
- [#715](https://github.com/influxdata/telegraf/pull/715): Fix influxdb precision config panic. Thanks @netixen!
## v0.10.2 [2016-02-04]
### Release Notes
- Statsd timing measurements are now aggregated into a single measurement with
fields.
- Graphite output now inserts tags into the bucket in alphabetical order.
- Normalized TLS/SSL support for output plugins: MQTT, AMQP, Kafka
- `verify_ssl` config option was removed from Kafka because it was actually
doing the opposite of what it claimed to do (yikes). It's been replaced by
`insecure_skip_verify`
### Features
- [#575](https://github.com/influxdata/telegraf/pull/575): Support for collecting Windows Performance Counters. Thanks @TheFlyingCorpse!
- [#564](https://github.com/influxdata/telegraf/issues/564): features for plugin writing simplification. Internal metric data type.
- [#603](https://github.com/influxdata/telegraf/pull/603): Aggregate statsd timing measurements into fields. Thanks @marcinbunsch!
- [#601](https://github.com/influxdata/telegraf/issues/601): Warn when overwriting cached metrics.
- [#614](https://github.com/influxdata/telegraf/pull/614): PowerDNS input plugin. Thanks @Kasen!
- [#617](https://github.com/influxdata/telegraf/pull/617): exec plugin: parse influx line protocol in addition to JSON.
- [#628](https://github.com/influxdata/telegraf/pull/628): Windows perf counters: pre-vista support
### Bugfixes
- [#595](https://github.com/influxdata/telegraf/issues/595): graphite output should include tags to separate duplicate measurements.
- [#599](https://github.com/influxdata/telegraf/issues/599): datadog plugin tags not working.
- [#600](https://github.com/influxdata/telegraf/issues/600): datadog measurement/field name parsing is wrong.
- [#602](https://github.com/influxdata/telegraf/issues/602): Fix statsd field name templating.
- [#612](https://github.com/influxdata/telegraf/pull/612): Docker input panic fix if stats received are nil.
- [#634](https://github.com/influxdata/telegraf/pull/634): Properly set host headers in httpjson. Thanks @reginaldosousa!
## v0.10.1 [2016-01-27]
### Release Notes
- Telegraf now keeps a fixed-length buffer of metrics per-output. This buffer
defaults to 10,000 metrics, and is adjustable. The buffer is cleared when a
successful write to that output occurs.
- The docker plugin has been significantly overhauled to add more metrics
and allow for docker-machine (incl OSX) support.
[See the readme](https://github.com/influxdata/telegraf/blob/master/plugins/inputs/docker/README.md)
for the latest measurements, fields, and tags. There is also now support for
specifying a docker endpoint to get metrics from.
### Features
- [#509](https://github.com/influxdata/telegraf/pull/509): Flatten JSON arrays with indices. Thanks @psilva261!
- [#512](https://github.com/influxdata/telegraf/pull/512): Python 3 build script, add lsof dep to package. Thanks @Ormod!
- [#475](https://github.com/influxdata/telegraf/pull/475): Add response time to httpjson plugin. Thanks @titilambert!
- [#519](https://github.com/influxdata/telegraf/pull/519): Added a sensors input based on lm-sensors. Thanks @md14454!
- [#467](https://github.com/influxdata/telegraf/issues/467): Add option to disable statsd measurement name conversion.
- [#534](https://github.com/influxdata/telegraf/pull/534): NSQ input plugin. Thanks @allingeek!
- [#494](https://github.com/influxdata/telegraf/pull/494): Graphite output plugin. Thanks @titilambert!
- AMQP SSL support. Thanks @ekini!
- [#539](https://github.com/influxdata/telegraf/pull/539): Reload config on SIGHUP. Thanks @titilambert!
- [#522](https://github.com/influxdata/telegraf/pull/522): Phusion passenger input plugin. Thanks @kureikain!
- [#541](https://github.com/influxdata/telegraf/pull/541): Kafka output TLS cert support. Thanks @Ormod!
- [#551](https://github.com/influxdata/telegraf/pull/551): Statsd UDP read packet size now defaults to 1500 bytes, and is configurable.
- [#552](https://github.com/influxdata/telegraf/pull/552): Support for collection interval jittering.
- [#484](https://github.com/influxdata/telegraf/issues/484): Include usage percent with procstat metrics.
- [#553](https://github.com/influxdata/telegraf/pull/553): Amazon CloudWatch output. thanks @skwong2!
- [#503](https://github.com/influxdata/telegraf/pull/503): Support docker endpoint configuration.
- [#563](https://github.com/influxdata/telegraf/pull/563): Docker plugin overhaul.
- [#285](https://github.com/influxdata/telegraf/issues/285): Fixed-size buffer of points.
- [#546](https://github.com/influxdata/telegraf/pull/546): SNMP Input plugin. Thanks @titilambert!
- [#589](https://github.com/influxdata/telegraf/pull/589): Microsoft SQL Server input plugin. Thanks @zensqlmonitor!
- [#573](https://github.com/influxdata/telegraf/pull/573): Github webhooks consumer input. Thanks @jackzampolin!
- [#471](https://github.com/influxdata/telegraf/pull/471): httpjson request headers. Thanks @asosso!
### Bugfixes
- [#506](https://github.com/influxdata/telegraf/pull/506): Ping input doesn't return response time metric when timeout. Thanks @titilambert!
- [#508](https://github.com/influxdata/telegraf/pull/508): Fix prometheus cardinality issue with the `net` plugin
- [#499](https://github.com/influxdata/telegraf/issues/499) & [#502](https://github.com/influxdata/telegraf/issues/502): php fpm unix socket and other fixes, thanks @kureikain!
- [#543](https://github.com/influxdata/telegraf/issues/543): Statsd Packet size sometimes truncated.
- [#440](https://github.com/influxdata/telegraf/issues/440): Don't query filtered devices for disk stats.
- [#463](https://github.com/influxdata/telegraf/issues/463): Docker plugin not working on AWS Linux
- [#568](https://github.com/influxdata/telegraf/issues/568): Multiple output race condition.
- [#585](https://github.com/influxdata/telegraf/pull/585): Log stack trace and continue on Telegraf panic. Thanks @wutaizeng!
## v0.10.0 [2016-01-12]
### Release Notes
- Linux packages have been taken out of `opt`, the binary is now in `/usr/bin`
and configuration files are in `/etc/telegraf`
- **breaking change** `plugins` have been renamed to `inputs`. This was done because
`plugins` is too generic, as there are now also "output plugins", and will likely
be "aggregator plugins" and "filter plugins" in the future. Additionally,
`inputs/` and `outputs/` directories have been placed in the root-level `plugins/`
directory.
- **breaking change** the `io` plugin has been renamed `diskio`
- **breaking change** plugin measurements aggregated into a single measurement.
- **breaking change** `jolokia` plugin: must use global tag/drop/pass parameters
for configuration.
- **breaking change** `twemproxy` plugin: `prefix` option removed.
- **breaking change** `procstat` cpu measurements are now prepended with `cpu_time_`
instead of only `cpu_`
- **breaking change** some command-line flags have been renamed to separate words.
`-configdirectory` -> `-config-directory`, `-filter` -> `-input-filter`,
`-outputfilter` -> `-output-filter`
- The prometheus plugin schema has not been changed (measurements have not been
aggregated).
### Packaging change note:
RHEL/CentOS users upgrading from 0.2.x to 0.10.0 will probably have their
configurations overwritten by the upgrade. There is a backup stored at
/etc/telegraf/telegraf.conf.$(date +%s).backup.
### Features
- Plugin measurements aggregated into a single measurement.
- Added ability to specify per-plugin tags
- Added ability to specify per-plugin measurement suffix and prefix.
(`name_prefix` and `name_suffix`)
- Added ability to override base plugin measurement name. (`name_override`)
### Bugfixes
## v0.2.5 [unreleased]
### Features
- [#427](https://github.com/influxdata/telegraf/pull/427): zfs plugin: pool stats added. Thanks @allenpetersen!
- [#428](https://github.com/influxdata/telegraf/pull/428): Amazon Kinesis output. Thanks @jimmystewpot!
- [#449](https://github.com/influxdata/telegraf/pull/449): influxdb plugin, thanks @mark-rushakoff
### Bugfixes
- [#430](https://github.com/influxdata/telegraf/issues/430): Network statistics removed in elasticsearch 2.1. Thanks @jipperinbham!
- [#452](https://github.com/influxdata/telegraf/issues/452): Elasticsearch open file handles error. Thanks @jipperinbham!
## v0.2.4 [2015-12-08]
### Features
- [#412](https://github.com/influxdata/telegraf/pull/412): Additional memcached stats. Thanks @mgresser!
- [#410](https://github.com/influxdata/telegraf/pull/410): Additional redis metrics. Thanks @vlaadbrain!
- [#414](https://github.com/influxdata/telegraf/issues/414): Jolokia plugin auth parameters
- [#415](https://github.com/influxdata/telegraf/issues/415): memcached plugin: support unix sockets
- [#418](https://github.com/influxdata/telegraf/pull/418): memcached plugin additional unit tests.
- [#408](https://github.com/influxdata/telegraf/pull/408): MailChimp plugin.
- [#382](https://github.com/influxdata/telegraf/pull/382): Add system wide network protocol stats to `net` plugin.
- [#401](https://github.com/influxdata/telegraf/pull/401): Support pass/drop/tagpass/tagdrop for outputs. Thanks @oldmantaiter!
### Bugfixes
- [#405](https://github.com/influxdata/telegraf/issues/405): Prometheus output cardinality issue
- [#388](https://github.com/influxdata/telegraf/issues/388): Fix collection hangup when cpu times decrement.
## v0.2.3 [2015-11-30]
### Release Notes
- **breaking change** The `kafka` plugin has been renamed to `kafka_consumer`.
and most of the config option names have changed.
This only affects the kafka consumer _plugin_ (not the
output). There were a number of problems with the kafka plugin that led to it
only collecting data once at startup, so the kafka plugin was basically non-
functional.
- Plugins can now be specified as a list, and multiple plugin instances of the
same type can be specified, like this:
```
[[inputs.cpu]]
percpu = false
totalcpu = true
[[inputs.cpu]]
percpu = true
totalcpu = false
drop = ["cpu_time"]
```
- Riemann output added
- Aerospike plugin: tag changed from `host` -> `aerospike_host`
### Features
- [#379](https://github.com/influxdata/telegraf/pull/379): Riemann output, thanks @allenj!
- [#375](https://github.com/influxdata/telegraf/pull/375): kafka_consumer service plugin.
- [#392](https://github.com/influxdata/telegraf/pull/392): Procstat plugin can now accept pgrep -f pattern, thanks @ecarreras!
- [#383](https://github.com/influxdata/telegraf/pull/383): Specify plugins as a list.
- [#354](https://github.com/influxdata/telegraf/pull/354): Add ability to specify multiple metrics in one statsd line. Thanks @MerlinDMC!
### Bugfixes
- [#371](https://github.com/influxdata/telegraf/issues/371): Kafka consumer plugin not functioning.
- [#389](https://github.com/influxdata/telegraf/issues/389): NaN value panic
## v0.2.2 [2015-11-18]
### Release Notes
- 0.2.1 has a bug where all lists within plugins get duplicated, this includes
lists of servers/URLs. 0.2.2 is being released solely to fix that bug
### Bugfixes
- [#377](https://github.com/influxdata/telegraf/pull/377): Fix for duplicate slices in inputs.
## v0.2.1 [2015-11-16]
### Release Notes
- Telegraf will no longer use docker-compose for "long" unit test, it has been
changed to just run docker commands in the Makefile. See `make docker-run` and
`make docker-kill`. `make test` will still run all unit tests with docker.
- Long unit tests are now run in CircleCI, with docker & race detector
- Redis plugin tag has changed from `host` to `server`
- HAProxy plugin tag has changed from `host` to `server`
- UDP output now supported
- Telegraf will now compile on FreeBSD
- Users can now specify outputs as lists, specifying multiple outputs of the
same type.
### Features
- [#325](https://github.com/influxdata/telegraf/pull/325): NSQ output. Thanks @jrxFive!
- [#318](https://github.com/influxdata/telegraf/pull/318): Prometheus output. Thanks @oldmantaiter!
- [#338](https://github.com/influxdata/telegraf/pull/338): Restart Telegraf on package upgrade. Thanks @linsomniac!
- [#337](https://github.com/influxdata/telegraf/pull/337): Jolokia plugin, thanks @saiello!
- [#350](https://github.com/influxdata/telegraf/pull/350): Amon output.
- [#365](https://github.com/influxdata/telegraf/pull/365): Twemproxy plugin by @codeb2cc
- [#317](https://github.com/influxdata/telegraf/issues/317): ZFS plugin, thanks @cornerot!
- [#364](https://github.com/influxdata/telegraf/pull/364): Support InfluxDB UDP output.
- [#370](https://github.com/influxdata/telegraf/pull/370): Support specifying multiple outputs, as lists.
- [#372](https://github.com/influxdata/telegraf/pull/372): Remove gosigar and update go-dockerclient for FreeBSD support. Thanks @MerlinDMC!
### Bugfixes
- [#331](https://github.com/influxdata/telegraf/pull/331): Dont overwrite host tag in redis plugin.
- [#336](https://github.com/influxdata/telegraf/pull/336): Mongodb plugin should take 2 measurements.
- [#351](https://github.com/influxdata/telegraf/issues/317): Fix continual "CREATE DATABASE" in writes
- [#360](https://github.com/influxdata/telegraf/pull/360): Apply prefix before ShouldPass check. Thanks @sotfo!
## v0.2.0 [2015-10-27]
### Release Notes
- The -test flag will now only output 2 collections for plugins that need it
- There is a new agent configuration option: `flush_interval`. This option tells
Telegraf how often to flush data to InfluxDB and other output sinks. For example,
users can set `interval = "2s"` and `flush_interval = "60s"` for Telegraf to
collect data every 2 seconds, and flush every 60 seconds.
- `precision` and `utc` are no longer valid agent config values. `precision` has
moved to the `influxdb` output config, where it will continue to default to "s"
- debug and test output will now print the raw line-protocol string
- Telegraf will now, by default, round the collection interval to the nearest
even interval. This means that `interval="10s"` will collect every :00, :10, etc.
To ease scale concerns, flushing will be "jittered" by a random amount so that
all Telegraf instances do not flush at the same time. Both of these options can
be controlled via the `round_interval` and `flush_jitter` config options.
- Telegraf will now retry metric flushes twice
### Features
- [#205](https://github.com/influxdata/telegraf/issues/205): Include per-db redis keyspace info
- [#226](https://github.com/influxdata/telegraf/pull/226): Add timestamps to points in Kafka/AMQP outputs. Thanks @ekini
- [#90](https://github.com/influxdata/telegraf/issues/90): Add Docker labels to tags in docker plugin
- [#223](https://github.com/influxdata/telegraf/pull/223): Add port tag to nginx plugin. Thanks @neezgee!
- [#227](https://github.com/influxdata/telegraf/pull/227): Add command intervals to exec plugin. Thanks @jpalay!
- [#241](https://github.com/influxdata/telegraf/pull/241): MQTT Output. Thanks @shirou!
- Memory plugin: cached and buffered measurements re-added
- Logging: additional logging for each collection interval, track the number
of metrics collected and from how many inputs.
- [#240](https://github.com/influxdata/telegraf/pull/240): procstat plugin, thanks @ranjib!
- [#244](https://github.com/influxdata/telegraf/pull/244): netstat plugin, thanks @shirou!
- [#262](https://github.com/influxdata/telegraf/pull/262): zookeeper plugin, thanks @jrxFive!
- [#237](https://github.com/influxdata/telegraf/pull/237): statsd service plugin, thanks @sparrc
- [#273](https://github.com/influxdata/telegraf/pull/273): puppet agent plugin, thats @jrxFive!
- [#280](https://github.com/influxdata/telegraf/issues/280): Use InfluxDB client v2.
- [#281](https://github.com/influxdata/telegraf/issues/281): Eliminate need to deep copy Batch Points.
- [#286](https://github.com/influxdata/telegraf/issues/286): bcache plugin, thanks @cornerot!
- [#287](https://github.com/influxdata/telegraf/issues/287): Batch AMQP output, thanks @ekini!
- [#301](https://github.com/influxdata/telegraf/issues/301): Collect on even intervals
- [#298](https://github.com/influxdata/telegraf/pull/298): Support retrying output writes
- [#300](https://github.com/influxdata/telegraf/issues/300): aerospike plugin. Thanks @oldmantaiter!
- [#322](https://github.com/influxdata/telegraf/issues/322): Librato output. Thanks @jipperinbham!
### Bugfixes
- [#228](https://github.com/influxdata/telegraf/pull/228): New version of package will replace old one. Thanks @ekini!
- [#232](https://github.com/influxdata/telegraf/pull/232): Fix bashism run during deb package installation. Thanks @yankcrime!
- [#261](https://github.com/influxdata/telegraf/issues/260): RabbitMQ panics if wrong credentials given. Thanks @ekini!
- [#245](https://github.com/influxdata/telegraf/issues/245): Document Exec plugin example. Thanks @ekini!
- [#264](https://github.com/influxdata/telegraf/issues/264): logrotate config file fixes. Thanks @linsomniac!
- [#290](https://github.com/influxdata/telegraf/issues/290): Fix some plugins sending their values as strings.
- [#289](https://github.com/influxdata/telegraf/issues/289): Fix accumulator panic on nil tags.
- [#302](https://github.com/influxdata/telegraf/issues/302): Fix `[tags]` getting applied, thanks @gotyaoi!
## v0.1.9 [2015-09-22]
### Release Notes
- InfluxDB output config change: `url` is now `urls`, and is a list. Config files
will still be backwards compatible if only `url` is specified.
- The -test flag will now output two metric collections
- Support for filtering telegraf outputs on the CLI -- Telegraf will now
allow filtering of output sinks on the command-line using the `-outputfilter`
flag, much like how the `-filter` flag works for inputs.
- Support for filtering on config-file creation -- Telegraf now supports
filtering to -sample-config command. You can now run
`telegraf -sample-config -filter cpu -outputfilter influxdb` to get a config
file with only the cpu plugin defined, and the influxdb output defined.
- **Breaking Change**: The CPU collection plugin has been refactored to fix some
bugs and outdated dependency issues. At the same time, I also decided to fix
a naming consistency issue, so cpu_percentageIdle will become cpu_usage_idle.
Also, all CPU time measurements now have it indicated in their name, so cpu_idle
will become cpu_time_idle. Additionally, cpu_time measurements are going to be
dropped in the default config.
- **Breaking Change**: The memory plugin has been refactored and some measurements
have been renamed for consistency. Some measurements have also been removed from being outputted. They are still being collected by gopsutil, and could easily be
re-added in a "verbose" mode if there is demand for it.
### Features
- [#143](https://github.com/influxdata/telegraf/issues/143): InfluxDB clustering support
- [#181](https://github.com/influxdata/telegraf/issues/181): Makefile GOBIN support. Thanks @Vye!
- [#203](https://github.com/influxdata/telegraf/pull/200): AMQP output. Thanks @ekini!
- [#182](https://github.com/influxdata/telegraf/pull/182): OpenTSDB output. Thanks @rplessl!
- [#187](https://github.com/influxdata/telegraf/pull/187): Retry output sink connections on startup.
- [#220](https://github.com/influxdata/telegraf/pull/220): Add port tag to apache plugin. Thanks @neezgee!
- [#217](https://github.com/influxdata/telegraf/pull/217): Add filtering for output sinks
and filtering when specifying a config file.
### Bugfixes
- [#170](https://github.com/influxdata/telegraf/issues/170): Systemd support
- [#175](https://github.com/influxdata/telegraf/issues/175): Set write precision before gathering metrics
- [#178](https://github.com/influxdata/telegraf/issues/178): redis plugin, multiple server thread hang bug
- Fix net plugin on darwin
- [#84](https://github.com/influxdata/telegraf/issues/84): Fix docker plugin on CentOS. Thanks @neezgee!
- [#189](https://github.com/influxdata/telegraf/pull/189): Fix mem_used_perc. Thanks @mced!
- [#192](https://github.com/influxdata/telegraf/issues/192): Increase compatibility of postgresql plugin. Now supports versions 8.1+
- [#203](https://github.com/influxdata/telegraf/issues/203): EL5 rpm support. Thanks @ekini!
- [#206](https://github.com/influxdata/telegraf/issues/206): CPU steal/guest values wrong on linux.
- [#212](https://github.com/influxdata/telegraf/issues/212): Add hashbang to postinstall script. Thanks @ekini!
- [#212](https://github.com/influxdata/telegraf/issues/212): Fix makefile warning. Thanks @ekini!
## v0.1.8 [2015-09-04]
### Release Notes
- Telegraf will now write data in UTC at second precision by default
- Now using Go 1.5 to build telegraf
### Features
- [#150](https://github.com/influxdata/telegraf/pull/150): Add Host Uptime metric to system plugin
- [#158](https://github.com/influxdata/telegraf/pull/158): Apache Plugin. Thanks @KPACHbIuLLIAnO4
- [#159](https://github.com/influxdata/telegraf/pull/159): Use second precision for InfluxDB writes
- [#165](https://github.com/influxdata/telegraf/pull/165): Add additional metrics to mysql plugin. Thanks @nickscript0
- [#162](https://github.com/influxdata/telegraf/pull/162): Write UTC by default, provide option
- [#166](https://github.com/influxdata/telegraf/pull/166): Upload binaries to S3
- [#169](https://github.com/influxdata/telegraf/pull/169): Ping plugin
### Bugfixes
## v0.1.7 [2015-08-28]
### Features
- [#38](https://github.com/influxdata/telegraf/pull/38): Kafka output producer.
- [#133](https://github.com/influxdata/telegraf/pull/133): Add plugin.Gather error logging. Thanks @nickscript0!
- [#136](https://github.com/influxdata/telegraf/issues/136): Add a -usage flag for printing usage of a single plugin.
- [#137](https://github.com/influxdata/telegraf/issues/137): Memcached: fix when a value contains a space
- [#138](https://github.com/influxdata/telegraf/issues/138): MySQL server address tag.
- [#142](https://github.com/influxdata/telegraf/pull/142): Add Description and SampleConfig funcs to output interface
- Indent the toml config file for readability
### Bugfixes
- [#128](https://github.com/influxdata/telegraf/issues/128): system_load measurement missing.
- [#129](https://github.com/influxdata/telegraf/issues/129): Latest pkg url fix.
- [#131](https://github.com/influxdata/telegraf/issues/131): Fix memory reporting on linux & darwin. Thanks @subhachandrachandra!
- [#140](https://github.com/influxdata/telegraf/issues/140): Memory plugin prec->perc typo fix. Thanks @brunoqc!
## v0.1.6 [2015-08-20]
### Features
- [#112](https://github.com/influxdb/telegraf/pull/112): Datadog output. Thanks @jipperinbham!
- [#116](https://github.com/influxdb/telegraf/pull/116): Use godep to vendor all dependencies
- [#120](https://github.com/influxdb/telegraf/pull/120): Httpjson plugin. Thanks @jpalay & @alvaromorales!
- [#112](https://github.com/influxdata/telegraf/pull/112): Datadog output. Thanks @jipperinbham!
- [#116](https://github.com/influxdata/telegraf/pull/116): Use godep to vendor all dependencies
- [#120](https://github.com/influxdata/telegraf/pull/120): Httpjson plugin. Thanks @jpalay & @alvaromorales!
### Bugfixes
- [#113](https://github.com/influxdb/telegraf/issues/113): Update README with Telegraf/InfluxDB compatibility
- [#118](https://github.com/influxdb/telegraf/pull/118): Fix for disk usage stats in Windows. Thanks @srfraser!
- [#122](https://github.com/influxdb/telegraf/issues/122): Fix for DiskUsage segv fault. Thanks @srfraser!
- [#126](https://github.com/influxdb/telegraf/issues/126): Nginx plugin not catching net.SplitHostPort error
- [#113](https://github.com/influxdata/telegraf/issues/113): Update README with Telegraf/InfluxDB compatibility
- [#118](https://github.com/influxdata/telegraf/pull/118): Fix for disk usage stats in Windows. Thanks @srfraser!
- [#122](https://github.com/influxdata/telegraf/issues/122): Fix for DiskUsage segv fault. Thanks @srfraser!
- [#126](https://github.com/influxdata/telegraf/issues/126): Nginx plugin not catching net.SplitHostPort error
## v0.1.5 [2015-08-13]
### Features
- [#54](https://github.com/influxdb/telegraf/pull/54): MongoDB plugin. Thanks @jipperinbham!
- [#55](https://github.com/influxdb/telegraf/pull/55): Elasticsearch plugin. Thanks @brocaar!
- [#71](https://github.com/influxdb/telegraf/pull/71): HAProxy plugin. Thanks @kureikain!
- [#72](https://github.com/influxdb/telegraf/pull/72): Adding TokuDB metrics to MySQL. Thanks vadimtk!
- [#73](https://github.com/influxdb/telegraf/pull/73): RabbitMQ plugin. Thanks @ianunruh!
- [#77](https://github.com/influxdb/telegraf/issues/77): Automatically create database.
- [#79](https://github.com/influxdb/telegraf/pull/56): Nginx plugin. Thanks @codeb2cc!
- [#86](https://github.com/influxdb/telegraf/pull/86): Lustre2 plugin. Thanks srfraser!
- [#91](https://github.com/influxdb/telegraf/pull/91): Unit testing
- [#92](https://github.com/influxdb/telegraf/pull/92): Exec plugin. Thanks @alvaromorales!
- [#98](https://github.com/influxdb/telegraf/pull/98): LeoFS plugin. Thanks @mocchira!
- [#103](https://github.com/influxdb/telegraf/pull/103): Filter by metric tags. Thanks @srfraser!
- [#106](https://github.com/influxdb/telegraf/pull/106): Options to filter plugins on startup. Thanks @zepouet!
- [#107](https://github.com/influxdb/telegraf/pull/107): Multiple outputs beyong influxdb. Thanks @jipperinbham!
- [#108](https://github.com/influxdb/telegraf/issues/108): Support setting per-CPU and total-CPU gathering.
- [#111](https://github.com/influxdb/telegraf/pull/111): Report CPU Usage in cpu plugin. Thanks @jpalay!
- [#54](https://github.com/influxdata/telegraf/pull/54): MongoDB plugin. Thanks @jipperinbham!
- [#55](https://github.com/influxdata/telegraf/pull/55): Elasticsearch plugin. Thanks @brocaar!
- [#71](https://github.com/influxdata/telegraf/pull/71): HAProxy plugin. Thanks @kureikain!
- [#72](https://github.com/influxdata/telegraf/pull/72): Adding TokuDB metrics to MySQL. Thanks vadimtk!
- [#73](https://github.com/influxdata/telegraf/pull/73): RabbitMQ plugin. Thanks @ianunruh!
- [#77](https://github.com/influxdata/telegraf/issues/77): Automatically create database.
- [#79](https://github.com/influxdata/telegraf/pull/56): Nginx plugin. Thanks @codeb2cc!
- [#86](https://github.com/influxdata/telegraf/pull/86): Lustre2 plugin. Thanks srfraser!
- [#91](https://github.com/influxdata/telegraf/pull/91): Unit testing
- [#92](https://github.com/influxdata/telegraf/pull/92): Exec plugin. Thanks @alvaromorales!
- [#98](https://github.com/influxdata/telegraf/pull/98): LeoFS plugin. Thanks @mocchira!
- [#103](https://github.com/influxdata/telegraf/pull/103): Filter by metric tags. Thanks @srfraser!
- [#106](https://github.com/influxdata/telegraf/pull/106): Options to filter plugins on startup. Thanks @zepouet!
- [#107](https://github.com/influxdata/telegraf/pull/107): Multiple outputs beyong influxdb. Thanks @jipperinbham!
- [#108](https://github.com/influxdata/telegraf/issues/108): Support setting per-CPU and total-CPU gathering.
- [#111](https://github.com/influxdata/telegraf/pull/111): Report CPU Usage in cpu plugin. Thanks @jpalay!
### Bugfixes
- [#85](https://github.com/influxdb/telegraf/pull/85): Fix GetLocalHost testutil function for mac users
- [#89](https://github.com/influxdb/telegraf/pull/89): go fmt fixes
- [#94](https://github.com/influxdb/telegraf/pull/94): Fix for issue #93, explicitly call sarama.v1 -> sarama
- [#101](https://github.com/influxdb/telegraf/issues/101): switch back from master branch if building locally
- [#99](https://github.com/influxdb/telegraf/issues/99): update integer output to new InfluxDB line protocol format
- [#85](https://github.com/influxdata/telegraf/pull/85): Fix GetLocalHost testutil function for mac users
- [#89](https://github.com/influxdata/telegraf/pull/89): go fmt fixes
- [#94](https://github.com/influxdata/telegraf/pull/94): Fix for issue #93, explicitly call sarama.v1 -> sarama
- [#101](https://github.com/influxdata/telegraf/issues/101): switch back from master branch if building locally
- [#99](https://github.com/influxdata/telegraf/issues/99): update integer output to new InfluxDB line protocol format
## v0.1.4 [2015-07-09]
### Features
- [#56](https://github.com/influxdb/telegraf/pull/56): Update README for Kafka plugin. Thanks @EmilS!
- [#56](https://github.com/influxdata/telegraf/pull/56): Update README for Kafka plugin. Thanks @EmilS!
### Bugfixes
- [#50](https://github.com/influxdb/telegraf/pull/50): Fix init.sh script to use telegraf directory. Thanks @jseriff!
- [#52](https://github.com/influxdb/telegraf/pull/52): Update CHANGELOG to reference updated directory. Thanks @benfb!
- [#50](https://github.com/influxdata/telegraf/pull/50): Fix init.sh script to use telegraf directory. Thanks @jseriff!
- [#52](https://github.com/influxdata/telegraf/pull/52): Update CHANGELOG to reference updated directory. Thanks @benfb!
## v0.1.3 [2015-07-05]
### Features
- [#35](https://github.com/influxdb/telegraf/pull/35): Add Kafka plugin. Thanks @EmilS!
- [#47](https://github.com/influxdb/telegraf/pull/47): Add RethinkDB plugin. Thanks @jipperinbham!
- [#35](https://github.com/influxdata/telegraf/pull/35): Add Kafka plugin. Thanks @EmilS!
- [#47](https://github.com/influxdata/telegraf/pull/47): Add RethinkDB plugin. Thanks @jipperinbham!
### Bugfixes
- [#45](https://github.com/influxdb/telegraf/pull/45): Skip disk tags that don't have a value. Thanks @jhofeditz!
- [#43](https://github.com/influxdb/telegraf/pull/43): Fix bug in MySQL plugin. Thanks @marcosnils!
- [#45](https://github.com/influxdata/telegraf/pull/45): Skip disk tags that don't have a value. Thanks @jhofeditz!
- [#43](https://github.com/influxdata/telegraf/pull/43): Fix bug in MySQL plugin. Thanks @marcosnils!
## v0.1.2 [2015-07-01]
### Features
- [#12](https://github.com/influxdb/telegraf/pull/12): Add Linux/ARM to the list of built binaries. Thanks @voxxit!
- [#14](https://github.com/influxdb/telegraf/pull/14): Clarify the S3 buckets that Telegraf is pushed to.
- [#16](https://github.com/influxdb/telegraf/pull/16): Convert Redis to use URI, support Redis AUTH. Thanks @jipperinbham!
- [#21](https://github.com/influxdb/telegraf/pull/21): Add memcached plugin. Thanks @Yukki!
- [#12](https://github.com/influxdata/telegraf/pull/12): Add Linux/ARM to the list of built binaries. Thanks @voxxit!
- [#14](https://github.com/influxdata/telegraf/pull/14): Clarify the S3 buckets that Telegraf is pushed to.
- [#16](https://github.com/influxdata/telegraf/pull/16): Convert Redis to use URI, support Redis AUTH. Thanks @jipperinbham!
- [#21](https://github.com/influxdata/telegraf/pull/21): Add memcached plugin. Thanks @Yukki!
### Bugfixes
- [#13](https://github.com/influxdb/telegraf/pull/13): Fix the packaging script.
- [#19](https://github.com/influxdb/telegraf/pull/19): Add host name to metric tags. Thanks @sherifzain!
- [#20](https://github.com/influxdb/telegraf/pull/20): Fix race condition with accumulator mutex. Thanks @nkatsaros!
- [#23](https://github.com/influxdb/telegraf/pull/23): Change name of folder for packages. Thanks @colinrymer!
- [#32](https://github.com/influxdb/telegraf/pull/32): Fix spelling of memoory -> memory. Thanks @tylernisonoff!
- [#13](https://github.com/influxdata/telegraf/pull/13): Fix the packaging script.
- [#19](https://github.com/influxdata/telegraf/pull/19): Add host name to metric tags. Thanks @sherifzain!
- [#20](https://github.com/influxdata/telegraf/pull/20): Fix race condition with accumulator mutex. Thanks @nkatsaros!
- [#23](https://github.com/influxdata/telegraf/pull/23): Change name of folder for packages. Thanks @colinrymer!
- [#32](https://github.com/influxdata/telegraf/pull/32): Fix spelling of memoory -> memory. Thanks @tylernisonoff!
## v0.1.1 [2015-06-19]

300
CONTRIBUTING.md Normal file
View File

@@ -0,0 +1,300 @@
## Steps for Contributing:
1. [Sign the CLA](http://influxdb.com/community/cla.html)
1. Make changes or write plugin (see below for details)
1. Add your plugin to `plugins/inputs/all/all.go` or `plugins/outputs/all/all.go`
1. If your plugin requires a new Go package,
[add it](https://github.com/influxdata/telegraf/blob/master/CONTRIBUTING.md#adding-a-dependency)
1. Write a README for your plugin, if it's an input plugin, it should be structured
like the [input example here](https://github.com/influxdata/telegraf/blob/master/plugins/inputs/EXAMPLE_README.md).
Output plugins READMEs are less structured,
but any information you can provide on how the data will look is appreciated.
See the [OpenTSDB output](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/opentsdb)
for a good example.
## GoDoc
Public interfaces for inputs, outputs, metrics, and the accumulator can be found
on the GoDoc
[![GoDoc](https://godoc.org/github.com/influxdata/telegraf?status.svg)](https://godoc.org/github.com/influxdata/telegraf)
## Sign the CLA
Before we can merge a pull request, you will need to sign the CLA,
which can be found [on our website](http://influxdb.com/community/cla.html)
## Adding a dependency
Assuming you can already build the project, run these in the telegraf directory:
1. `go get github.com/sparrc/gdm`
1. `gdm restore`
1. `gdm save`
## Input Plugins
This section is for developers who want to create new collection inputs.
Telegraf is entirely plugin driven. This interface allows for operators to
pick and chose what is gathered and makes it easy for developers
to create new ways of generating metrics.
Plugin authorship is kept as simple as possible to promote people to develop
and submit new inputs.
### Input Plugin Guidelines
* A plugin must conform to the `telegraf.Input` interface.
* Input Plugins should call `inputs.Add` in their `init` function to register themselves.
See below for a quick example.
* Input Plugins must be added to the
`github.com/influxdata/telegraf/plugins/inputs/all/all.go` file.
* The `SampleConfig` function should return valid toml that describes how the
plugin can be configured. This is include in `telegraf -sample-config`.
* The `Description` function should say in one line what this plugin does.
Let's say you've written a plugin that emits metrics about processes on the
current host.
### Input Plugin Example
```go
package simple
// simple.go
import (
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins/inputs"
)
type Simple struct {
Ok bool
}
func (s *Simple) Description() string {
return "a demo plugin"
}
func (s *Simple) SampleConfig() string {
return "ok = true # indicate if everything is fine"
}
func (s *Simple) Gather(acc telegraf.Accumulator) error {
if s.Ok {
acc.Add("state", "pretty good", nil)
} else {
acc.Add("state", "not great", nil)
}
return nil
}
func init() {
inputs.Add("simple", func() telegraf.Input { return &Simple{} })
}
```
## Input Plugins Accepting Arbitrary Data Formats
Some input plugins (such as
[exec](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/exec))
accept arbitrary input data formats. An overview of these data formats can
be found
[here](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md).
In order to enable this, you must specify a `SetParser(parser parsers.Parser)`
function on the plugin object (see the exec plugin for an example), as well as
defining `parser` as a field of the object.
You can then utilize the parser internally in your plugin, parsing data as you
see fit. Telegraf's configuration layer will take care of instantiating and
creating the `Parser` object.
You should also add the following to your SampleConfig() return:
```toml
## Data format to consume.
## Each data format has it's own unique set of configuration options, read
## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
data_format = "influx"
```
Below is the `Parser` interface.
```go
// Parser is an interface defining functions that a parser plugin must satisfy.
type Parser interface {
// Parse takes a byte buffer separated by newlines
// ie, `cpu.usage.idle 90\ncpu.usage.busy 10`
// and parses it into telegraf metrics
Parse(buf []byte) ([]telegraf.Metric, error)
// ParseLine takes a single string metric
// ie, "cpu.usage.idle 90"
// and parses it into a telegraf metric.
ParseLine(line string) (telegraf.Metric, error)
}
```
And you can view the code
[here.](https://github.com/influxdata/telegraf/blob/henrypfhu-master/plugins/parsers/registry.go)
## Service Input Plugins
This section is for developers who want to create new "service" collection
inputs. A service plugin differs from a regular plugin in that it operates
a background service while Telegraf is running. One example would be the `statsd`
plugin, which operates a statsd server.
Service Input Plugins are substantially more complicated than a regular plugin, as they
will require threads and locks to verify data integrity. Service Input Plugins should
be avoided unless there is no way to create their behavior with a regular plugin.
Their interface is quite similar to a regular plugin, with the addition of `Start()`
and `Stop()` methods.
### Service Plugin Guidelines
* Same as the `Plugin` guidelines, except that they must conform to the
`inputs.ServiceInput` interface.
## Output Plugins
This section is for developers who want to create a new output sink. Outputs
are created in a similar manner as collection plugins, and their interface has
similar constructs.
### Output Plugin Guidelines
* An output must conform to the `outputs.Output` interface.
* Outputs should call `outputs.Add` in their `init` function to register themselves.
See below for a quick example.
* To be available within Telegraf itself, plugins must add themselves to the
`github.com/influxdata/telegraf/plugins/outputs/all/all.go` file.
* The `SampleConfig` function should return valid toml that describes how the
output can be configured. This is include in `telegraf -sample-config`.
* The `Description` function should say in one line what this output does.
### Output Example
```go
package simpleoutput
// simpleoutput.go
import (
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins/outputs"
)
type Simple struct {
Ok bool
}
func (s *Simple) Description() string {
return "a demo output"
}
func (s *Simple) SampleConfig() string {
return "url = localhost"
}
func (s *Simple) Connect() error {
// Make a connection to the URL here
return nil
}
func (s *Simple) Close() error {
// Close connection to the URL here
return nil
}
func (s *Simple) Write(metrics []telegraf.Metric) error {
for _, pt := range points {
// write `pt` to the output sink here
}
return nil
}
func init() {
outputs.Add("simpleoutput", func() telegraf.Output { return &Simple{} })
}
```
## Output Plugins Writing Arbitrary Data Formats
Some output plugins (such as
[file](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/file))
can write arbitrary output data formats. An overview of these data formats can
be found
[here](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md).
In order to enable this, you must specify a
`SetSerializer(serializer serializers.Serializer)`
function on the plugin object (see the file plugin for an example), as well as
defining `serializer` as a field of the object.
You can then utilize the serializer internally in your plugin, serializing data
before it's written. Telegraf's configuration layer will take care of
instantiating and creating the `Serializer` object.
You should also add the following to your SampleConfig() return:
```toml
## Data format to output.
## Each data format has it's own unique set of configuration options, read
## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
data_format = "influx"
```
## Service Output Plugins
This section is for developers who want to create new "service" output. A
service output differs from a regular output in that it operates a background service
while Telegraf is running. One example would be the `prometheus_client` output,
which operates an HTTP server.
Their interface is quite similar to a regular output, with the addition of `Start()`
and `Stop()` methods.
### Service Output Guidelines
* Same as the `Output` guidelines, except that they must conform to the
`output.ServiceOutput` interface.
## Unit Tests
### Execute short tests
execute `make test-short`
### Execute long tests
As Telegraf collects metrics from several third-party services it becomes a
difficult task to mock each service as some of them have complicated protocols
which would take some time to replicate.
To overcome this situation we've decided to use docker containers to provide a
fast and reproducible environment to test those services which require it.
For other situations
(i.e: https://github.com/influxdata/telegraf/blob/master/plugins/inputs/redis/redis_test.go)
a simple mock will suffice.
To execute Telegraf tests follow these simple steps:
- Install docker following [these](https://docs.docker.com/installation/)
instructions
- execute `make test`
**OSX users**: you will need to install `boot2docker` or `docker-machine`.
The Makefile will assume that you have a `docker-machine` box called `default` to
get the IP address.
### Unit test troubleshooting
Try cleaning up your test environment by executing `make docker-kill` and
re-running

58
Godeps Normal file
View File

@@ -0,0 +1,58 @@
github.com/Shopify/sarama 8aadb476e66ca998f2f6bb3c993e9a2daa3666b9
github.com/Sirupsen/logrus 219c8cb75c258c552e999735be6df753ffc7afdc
github.com/amir/raidman 53c1b967405155bfc8758557863bf2e14f814687
github.com/aws/aws-sdk-go 13a12060f716145019378a10e2806c174356b857
github.com/beorn7/perks 3ac7bf7a47d159a033b107610db8a1b6575507a4
github.com/cenkalti/backoff 4dc77674aceaabba2c7e3da25d4c823edfb73f99
github.com/couchbase/go-couchbase cb664315a324d87d19c879d9cc67fda6be8c2ac1
github.com/couchbase/gomemcached a5ea6356f648fec6ab89add00edd09151455b4b2
github.com/couchbase/goutils 5823a0cbaaa9008406021dc5daf80125ea30bba6
github.com/dancannon/gorethink e7cac92ea2bc52638791a021f212145acfedb1fc
github.com/davecgh/go-spew 5215b55f46b2b919f50a1df0eaa5886afe4e3b3d
github.com/docker/engine-api 8924d6900370b4c7e7984be5adc61f50a80d7537
github.com/docker/go-connections f549a9393d05688dff0992ef3efd8bbe6c628aeb
github.com/docker/go-units 5d2041e26a699eaca682e2ea41c8f891e1060444
github.com/eapache/go-resiliency b86b1ec0dd4209a588dc1285cdd471e73525c0b3
github.com/eapache/queue ded5959c0d4e360646dc9e9908cff48666781367
github.com/eclipse/paho.mqtt.golang 0f7a459f04f13a41b7ed752d47944528d4bf9a86
github.com/go-sql-driver/mysql 1fca743146605a172a266e1654e01e5cd5669bee
github.com/gobwas/glob d877f6352135181470c40c73ebb81aefa22115fa
github.com/golang/protobuf 552c7b9542c194800fd493123b3798ef0a832032
github.com/golang/snappy 427fb6fc07997f43afa32f35e850833760e489a7
github.com/gonuts/go-shellquote e842a11b24c6abfb3dd27af69a17f482e4b483c2
github.com/gorilla/context 1ea25387ff6f684839d82767c1733ff4d4d15d0a
github.com/gorilla/mux c9e326e2bdec29039a3761c07bece13133863e1e
github.com/hailocab/go-hostpool e80d13ce29ede4452c43dea11e79b9bc8a15b478
github.com/hpcloud/tail b2940955ab8b26e19d43a43c4da0475dd81bdb56
github.com/influxdata/config b79f6829346b8d6e78ba73544b1e1038f1f1c9da
github.com/influxdata/influxdb e094138084855d444195b252314dfee9eae34cab
github.com/influxdata/toml af4df43894b16e3fd2b788d01bd27ad0776ef2d0
github.com/klauspost/crc32 19b0b332c9e4516a6370a0456e6182c3b5036720
github.com/lib/pq e182dc4027e2ded4b19396d638610f2653295f36
github.com/matttproud/golang_protobuf_extensions d0c3fe89de86839aecf2e0579c40ba3bb336a453
github.com/miekg/dns cce6c130cdb92c752850880fd285bea1d64439dd
github.com/mreiferson/go-snappystream 028eae7ab5c4c9e2d1cb4c4ca1e53259bbe7e504
github.com/naoina/go-stringutil 6b638e95a32d0c1131db0e7fe83775cbea4a0d0b
github.com/nats-io/nats b13fc9d12b0b123ebc374e6b808c6228ae4234a3
github.com/nats-io/nuid 4f84f5f3b2786224e336af2e13dba0a0a80b76fa
github.com/nsqio/go-nsq 0b80d6f05e15ca1930e0c5e1d540ed627e299980
github.com/opencontainers/runc 89ab7f2ccc1e45ddf6485eaa802c35dcf321dfc8
github.com/prometheus/client_golang 18acf9993a863f4c4b40612e19cdd243e7c86831
github.com/prometheus/client_model fa8ad6fec33561be4280a8f0514318c79d7f6cb6
github.com/prometheus/common e8eabff8812b05acf522b45fdcd725a785188e37
github.com/prometheus/procfs 406e5b7bfd8201a36e2bb5f7bdae0b03380c2ce8
github.com/samuel/go-zookeeper 218e9c81c0dd8b3b18172b2bbfad92cc7d6db55f
github.com/shirou/gopsutil 37d89088411de59a4ef9fc340afa0e89dfcb4ea9
github.com/soniah/gosnmp b1b4f885b12c5dcbd021c5cee1c904110de6db7d
github.com/streadway/amqp b4f3ceab0337f013208d31348b578d83c0064744
github.com/stretchr/testify 1f4a1643a57e798696635ea4c126e9127adb7d3c
github.com/wvanbergen/kafka 46f9a1cf3f670edec492029fadded9c2d9e18866
github.com/wvanbergen/kazoo-go 0f768712ae6f76454f987c3356177e138df258f8
github.com/zensqlmonitor/go-mssqldb ffe5510c6fa5e15e6d983210ab501c815b56b363
golang.org/x/crypto 5dc8cb4b8a8eb076cbb5a06bc3b8682c15bdbbd3
golang.org/x/net 6acef71eb69611914f7a30939ea9f6e194c78172
golang.org/x/text a71fd10341b064c10f4a81ceac72bcf70f26ea34
gopkg.in/dancannon/gorethink.v1 7d1af5be49cb5ecc7b177bf387d232050299d6ef
gopkg.in/fatih/pool.v2 cba550ebf9bce999a02e963296d4bc7a486cb715
gopkg.in/mgo.v2 d90005c5262a3463800497ea5a89aed5fe22c886
gopkg.in/yaml.v2 a83829b6f1293c91addabc89d0571c246397bbf4

210
Godeps/Godeps.json generated
View File

@@ -1,210 +0,0 @@
{
"ImportPath": "github.com/influxdb/telegraf",
"GoVersion": "go1.4.2",
"Packages": [
"./..."
],
"Deps": [
{
"ImportPath": "github.com/Shopify/sarama",
"Comment": "v1.4.3-45-g5b18996",
"Rev": "5b18996ef1cd555a60562ae4c5d7843ae137e12d"
},
{
"ImportPath": "github.com/Sirupsen/logrus",
"Comment": "v0.8.6-7-g9c060de",
"Rev": "9c060de643590dae45da9d7c26276463bfc46fa0"
},
{
"ImportPath": "github.com/armon/go-metrics",
"Rev": "b2d95e5291cdbc26997d1301a5e467ecbb240e25"
},
{
"ImportPath": "github.com/boltdb/bolt",
"Comment": "v1.0-117-g0f053fa",
"Rev": "0f053fabc06119583d61937a0a06ef0ba0f1b301"
},
{
"ImportPath": "github.com/cenkalti/backoff",
"Rev": "4dc77674aceaabba2c7e3da25d4c823edfb73f99"
},
{
"ImportPath": "github.com/dancannon/gorethink/encoding",
"Comment": "v1.x.x-1-g786f12a",
"Rev": "786f12ae730ea93485c4eb2c44b3ede6e1e8745f"
},
{
"ImportPath": "github.com/dancannon/gorethink/ql2",
"Comment": "v1.x.x-1-g786f12a",
"Rev": "786f12ae730ea93485c4eb2c44b3ede6e1e8745f"
},
{
"ImportPath": "github.com/dancannon/gorethink/types",
"Comment": "v1.x.x-1-g786f12a",
"Rev": "786f12ae730ea93485c4eb2c44b3ede6e1e8745f"
},
{
"ImportPath": "github.com/eapache/go-resiliency/breaker",
"Comment": "v1.0.0-1-ged0319b",
"Rev": "ed0319b32e66e3295db52695ba3ee493e823fbfe"
},
{
"ImportPath": "github.com/eapache/queue",
"Comment": "v1.0.2",
"Rev": "ded5959c0d4e360646dc9e9908cff48666781367"
},
{
"ImportPath": "github.com/fsouza/go-dockerclient",
"Rev": "42d06e2b125654477366c320dcea99107a86e9c2"
},
{
"ImportPath": "github.com/go-sql-driver/mysql",
"Comment": "v1.2-118-g3dd7008",
"Rev": "3dd7008ac1529aca1bcd8a9db75228a71ba23cac"
},
{
"ImportPath": "github.com/gogo/protobuf/proto",
"Rev": "cabd153b69f71bab8b89fd667a2d9bb28c92ceb4"
},
{
"ImportPath": "github.com/golang/protobuf/proto",
"Rev": "73aaaa9eb61d74fbf7e256ca586a3a565b308eea"
},
{
"ImportPath": "github.com/golang/snappy",
"Rev": "723cc1e459b8eea2dea4583200fd60757d40097a"
},
{
"ImportPath": "github.com/gonuts/go-shellquote",
"Rev": "e842a11b24c6abfb3dd27af69a17f482e4b483c2"
},
{
"ImportPath": "github.com/hashicorp/go-msgpack/codec",
"Rev": "fa3f63826f7c23912c15263591e65d54d080b458"
},
{
"ImportPath": "github.com/hashicorp/raft",
"Rev": "9b586e29edf1ed085b11da7772479ee45c433996"
},
{
"ImportPath": "github.com/hashicorp/raft-boltdb",
"Rev": "d1e82c1ec3f15ee991f7cc7ffd5b67ff6f5bbaee"
},
{
"ImportPath": "github.com/influxdb/influxdb/client",
"Comment": "v0.9.3-rc1",
"Rev": "f4077764b2bb2b03241452d88e9db321c62bb560"
},
{
"ImportPath": "github.com/influxdb/influxdb/influxql",
"Comment": "v0.9.3-rc1",
"Rev": "f4077764b2bb2b03241452d88e9db321c62bb560"
},
{
"ImportPath": "github.com/influxdb/influxdb/meta",
"Comment": "v0.9.3-rc1",
"Rev": "f4077764b2bb2b03241452d88e9db321c62bb560"
},
{
"ImportPath": "github.com/influxdb/influxdb/snapshot",
"Comment": "v0.9.3-rc1",
"Rev": "f4077764b2bb2b03241452d88e9db321c62bb560"
},
{
"ImportPath": "github.com/influxdb/influxdb/toml",
"Comment": "v0.9.3-rc1",
"Rev": "f4077764b2bb2b03241452d88e9db321c62bb560"
},
{
"ImportPath": "github.com/influxdb/influxdb/tsdb",
"Comment": "v0.9.3-rc1",
"Rev": "f4077764b2bb2b03241452d88e9db321c62bb560"
},
{
"ImportPath": "github.com/lib/pq",
"Comment": "go1.0-cutoff-59-gb269bd0",
"Rev": "b269bd035a727d6c1081f76e7a239a1b00674c40"
},
{
"ImportPath": "github.com/matttproud/golang_protobuf_extensions/pbutil",
"Rev": "fc2b8d3a73c4867e51861bbdd5ae3c1f0869dd6a"
},
{
"ImportPath": "github.com/naoina/go-stringutil",
"Rev": "360db0db4b01d34e12a2ec042c09e7d37fece761"
},
{
"ImportPath": "github.com/naoina/toml",
"Rev": "5811abcabb29d6af0fdf060f96d328962bd3cd5e"
},
{
"ImportPath": "github.com/prometheus/client_golang/extraction",
"Comment": "0.7.0-22-gbbd006b",
"Rev": "bbd006bc5e64ea2c807381d50263be5f230b427d"
},
{
"ImportPath": "github.com/prometheus/client_golang/model",
"Comment": "0.7.0-22-gbbd006b",
"Rev": "bbd006bc5e64ea2c807381d50263be5f230b427d"
},
{
"ImportPath": "github.com/prometheus/client_golang/text",
"Comment": "0.7.0-22-gbbd006b",
"Rev": "bbd006bc5e64ea2c807381d50263be5f230b427d"
},
{
"ImportPath": "github.com/prometheus/client_model/go",
"Comment": "model-0.0.2-12-gfa8ad6f",
"Rev": "fa8ad6fec33561be4280a8f0514318c79d7f6cb6"
},
{
"ImportPath": "github.com/samuel/go-zookeeper/zk",
"Rev": "5bb5cfc093ad18a28148c578f8632cfdb4d802e4"
},
{
"ImportPath": "github.com/stretchr/objx",
"Rev": "cbeaeb16a013161a98496fad62933b1d21786672"
},
{
"ImportPath": "github.com/stretchr/testify/assert",
"Comment": "v1.0-21-gf552045",
"Rev": "f5520455607c0233cb6d7b056f71b22c1d265ef1"
},
{
"ImportPath": "github.com/stretchr/testify/mock",
"Comment": "v1.0-21-gf552045",
"Rev": "f5520455607c0233cb6d7b056f71b22c1d265ef1"
},
{
"ImportPath": "github.com/stretchr/testify/require",
"Comment": "v1.0-21-gf552045",
"Rev": "f5520455607c0233cb6d7b056f71b22c1d265ef1"
},
{
"ImportPath": "github.com/wvanbergen/kafka/consumergroup",
"Rev": "b0e5c20a0d7c3ccfd37a5965ae30a3a0fd15945d"
},
{
"ImportPath": "github.com/wvanbergen/kazoo-go",
"Rev": "02a3868e9b87153285439cd27a39c0a2984a13af"
},
{
"ImportPath": "golang.org/x/crypto/bcrypt",
"Rev": "173ce04bfaf66c7bb0fa9d5c0bfd93e773909dbd"
},
{
"ImportPath": "golang.org/x/crypto/blowfish",
"Rev": "173ce04bfaf66c7bb0fa9d5c0bfd93e773909dbd"
},
{
"ImportPath": "gopkg.in/dancannon/gorethink.v1",
"Comment": "v1.x.x",
"Rev": "8aca6ba2cc6e873299617d730fac0d7f6593113a"
},
{
"ImportPath": "gopkg.in/mgo.v2",
"Comment": "r2015.06.03-3-g3569c88",
"Rev": "3569c88678d88179dcbd68d02ab081cbca3cd4d0"
}
]
}

5
Godeps/Readme generated
View File

@@ -1,5 +0,0 @@
This directory tree is generated automatically by godep.
Please do not edit.
See https://github.com/tools/godep for more information.

2
Godeps/_workspace/.gitignore generated vendored
View File

@@ -1,2 +0,0 @@
/pkg
/bin

View File

@@ -1,24 +0,0 @@
# Compiled Object files, Static and Dynamic libs (Shared Objects)
*.o
*.a
*.so
*.test
# Folders
_obj
_test
.vagrant
# Architecture specific extensions/prefixes
*.[568vq]
[568vq].out
*.cgo1.go
*.cgo2.c
_cgo_defun.c
_cgo_gotypes.go
_cgo_export.*
_testmain.go
*.exe

View File

@@ -1,41 +0,0 @@
language: go
go:
- 1.3.3
- 1.4.2
env:
global:
- KAFKA_PEERS=localhost:9091,localhost:9092,localhost:9093,localhost:9094,localhost:9095
- TOXIPROXY_ADDR=http://localhost:8474
- KAFKA_INSTALL_ROOT=/home/travis/kafka
- KAFKA_HOSTNAME=localhost
- DEBUG=true
matrix:
- KAFKA_VERSION=0.8.1.1
- KAFKA_VERSION=0.8.2.1
before_install:
- export REPOSITORY_ROOT=${TRAVIS_BUILD_DIR}
- vagrant/install_cluster.sh
- vagrant/boot_cluster.sh
- vagrant/create_topics.sh
install:
- make install_dependencies
script:
- make test
- make vet
- make errcheck
- make fmt
matrix:
include:
- go: tip
env: KAFKA_VERSION=0.8.2.1
allow_failures:
- go: tip
fast_finish: true
sudo: false

View File

@@ -1,157 +0,0 @@
# Changelog
#### Version 1.5.0 (unreleased)
New Features:
- TLS-encrypted network connections are now supported. This feature is subject
to change when Kafka releases built-in TLS support, but for now this is
enough to work with TLS-terminating proxies
([#154](https://github.com/Shopify/sarama/pull/154)).
Improvements:
- The consumer will not block if a single partition is not drained by the user;
all other partitions will continue to consume normally
([#485](https://github.com/Shopify/sarama/pull/485)).
- Formatting of error strings has been much improved
([#495](https://github.com/Shopify/sarama/pull/495)).
- Internal refactoring of the producer for code cleanliness and to enable
future work ([#300](https://github.com/Shopify/sarama/pull/300)).
Bug Fixes:
- Fix a potential deadlock in the consumer on shutdown
([#475](https://github.com/Shopify/sarama/pull/475)).
#### Version 1.4.3 (2015-07-21)
Bug Fixes:
- Don't include the partitioner in the producer's "fetch partitions"
circuit-breaker ([#466](https://github.com/Shopify/sarama/pull/466)).
- Don't retry messages until the broker is closed when abandoning a broker in
the producer ([#468](https://github.com/Shopify/sarama/pull/468)).
- Update the import path for snappy-go, it has moved again and the API has
changed slightly ([#486](https://github.com/Shopify/sarama/pull/486)).
#### Version 1.4.2 (2015-05-27)
Bug Fixes:
- Update the import path for snappy-go, it has moved from google code to github
([#456](https://github.com/Shopify/sarama/pull/456)).
#### Version 1.4.1 (2015-05-25)
Improvements:
- Optimizations when decoding snappy messages, thanks to John Potocny
([#446](https://github.com/Shopify/sarama/pull/446)).
Bug Fixes:
- Fix hypothetical race conditions on producer shutdown
([#450](https://github.com/Shopify/sarama/pull/450),
[#451](https://github.com/Shopify/sarama/pull/451)).
#### Version 1.4.0 (2015-05-01)
New Features:
- The consumer now implements `Topics()` and `Partitions()` methods to enable
users to dynamically choose what topics/partitions to consume without
instantiating a full client
([#431](https://github.com/Shopify/sarama/pull/431)).
- The partition-consumer now exposes the high water mark offset value returned
by the broker via the `HighWaterMarkOffset()` method ([#339](https://github.com/Shopify/sarama/pull/339)).
- Added a `kafka-console-consumer` tool capable of handling multiple
partitions, and deprecated the now-obsolete `kafka-console-partitionConsumer`
([#439](https://github.com/Shopify/sarama/pull/439),
[#442](https://github.com/Shopify/sarama/pull/442)).
Improvements:
- The producer's logging during retry scenarios is more consistent, more
useful, and slightly less verbose
([#429](https://github.com/Shopify/sarama/pull/429)).
- The client now shuffles its initial list of seed brokers in order to prevent
thundering herd on the first broker in the list
([#441](https://github.com/Shopify/sarama/pull/441)).
Bug Fixes:
- The producer now correctly manages its state if retries occur when it is
shutting down, fixing several instances of confusing behaviour and at least
one potential deadlock ([#419](https://github.com/Shopify/sarama/pull/419)).
- The consumer now handles messages for different partitions asynchronously,
making it much more resilient to specific user code ordering
([#325](https://github.com/Shopify/sarama/pull/325)).
#### Version 1.3.0 (2015-04-16)
New Features:
- The client now tracks consumer group coordinators using
ConsumerMetadataRequests similar to how it tracks partition leadership using
regular MetadataRequests ([#411](https://github.com/Shopify/sarama/pull/411)).
This adds two methods to the client API:
- `Coordinator(consumerGroup string) (*Broker, error)`
- `RefreshCoordinator(consumerGroup string) error`
Improvements:
- ConsumerMetadataResponses now automatically create a Broker object out of the
ID/address/port combination for the Coordinator; accessing the fields
individually has been deprecated
([#413](https://github.com/Shopify/sarama/pull/413)).
- Much improved handling of `OffsetOutOfRange` errors in the consumer.
Consumers will fail to start if the provided offset is out of range
([#418](https://github.com/Shopify/sarama/pull/418))
and they will automatically shut down if the offset falls out of range
([#424](https://github.com/Shopify/sarama/pull/424)).
- Small performance improvement in encoding and decoding protocol messages
([#427](https://github.com/Shopify/sarama/pull/427)).
Bug Fixes:
- Fix a rare race condition in the client's background metadata refresher if
it happens to be activated while the client is being closed
([#422](https://github.com/Shopify/sarama/pull/422)).
#### Version 1.2.0 (2015-04-07)
Improvements:
- The producer's behaviour when `Flush.Frequency` is set is now more intuitive
([#389](https://github.com/Shopify/sarama/pull/389)).
- The producer is now somewhat more memory-efficient during and after retrying
messages due to an improved queue implementation
([#396](https://github.com/Shopify/sarama/pull/396)).
- The consumer produces much more useful logging output when leadership
changes ([#385](https://github.com/Shopify/sarama/pull/385)).
- The client's `GetOffset` method will now automatically refresh metadata and
retry once in the event of stale information or similar
([#394](https://github.com/Shopify/sarama/pull/394)).
- Broker connections now have support for using TCP keepalives
([#407](https://github.com/Shopify/sarama/issues/407)).
Bug Fixes:
- The OffsetCommitRequest message now correctly implements all three possible
API versions ([#390](https://github.com/Shopify/sarama/pull/390),
[#400](https://github.com/Shopify/sarama/pull/400)).
#### Version 1.1.0 (2015-03-20)
Improvements:
- Wrap the producer's partitioner call in a circuit-breaker so that repeatedly
broken topics don't choke throughput
([#373](https://github.com/Shopify/sarama/pull/373)).
Bug Fixes:
- Fix the producer's internal reference counting in certain unusual scenarios
([#367](https://github.com/Shopify/sarama/pull/367)).
- Fix the consumer's internal reference counting in certain unusual scenarios
([#369](https://github.com/Shopify/sarama/pull/369)).
- Fix a condition where the producer's internal control messages could have
gotten stuck ([#368](https://github.com/Shopify/sarama/pull/368)).
- Fix an issue where invalid partition lists would be cached when asking for
metadata for a non-existant topic ([#372](https://github.com/Shopify/sarama/pull/372)).
#### Version 1.0.0 (2015-03-17)
Version 1.0.0 is the first tagged version, and is almost a complete rewrite. The primary differences with previous untagged versions are:
- The producer has been rewritten; there is now a `SyncProducer` with a blocking API, and an `AsyncProducer` that is non-blocking.
- The consumer has been rewritten to only open one connection per broker instead of one connection per partition.
- The main types of Sarama are now interfaces to make depedency injection easy; mock implementations for `Consumer`, `SyncProducer` and `AsyncProducer` are provided in the `github.com/Shopify/sarama/mocks` package.
- For most uses cases, it is no longer necessary to open a `Client`; this will be done for you.
- All the configuration values have been unified in the `Config` struct.
- Much improved test suite.

View File

@@ -1,31 +0,0 @@
# Contributing
Contributions are always welcome, both reporting issues and submitting pull requests!
### Reporting issues
Please make sure to include any potentially useful information in the issue, so we can pinpoint the issue faster without going back and forth.
- What SHA of Sarama are you running? If this is not the latest SHA on the master branch, please try if the problem persists with the latest version.
- You can set `sarama.Logger` to a [log.Logger](http://golang.org/pkg/log/#Logger) instance to capture debug output. Please include it in your issue description.
- Also look at the logs of the Kafka broker you are connected to. If you see anything out of the ordinary, please include it.
Also, please include the following information about your environment, so we can help you faster:
- What version of Kafka are you using?
- What version of Go are you using?
- What are the values of your Producer/Consumer/Client configuration?
### Submitting pull requests
We will gladly accept bug fixes, or additions to this library. Please fork this library, commit & push your changes, and open a pull request. Because this library is in production use by many people and applications, we code review all additions. To make the review process go as smooth as possible, please consider the following.
- If you plan to work on something major, please open an issue to discuss the design first.
- Don't break backwards compatibility. If you really have to, open an issue to discuss this first.
- Make sure to use the `go fmt` command to format your code according to the standards. Even better, set up your editor to do this for you when saving.
- Run [go vet](https://godoc.org/golang.org/x/tools/cmd/vet) to detect any suspicious constructs in your code that could be bugs.
- Explicitly handle all error return values. If you really want to ignore an error value, you can assign it to `_`.You can use [errcheck](https://github.com/kisielk/errcheck) to verify whether you have handled all errors.
- You may also want to run [golint](https://github.com/golang/lint) as well to detect style problems.
- Add tests that cover the changes you made. Make sure to run `go test` with the `-race` argument to test for race conditions.
- Make sure your code is supported by all the Go versions we support. You can rely on [Travis CI](https://travis-ci.org/Shopify/sarama) for testing older Go versions

View File

@@ -1,20 +0,0 @@
Copyright (c) 2013 Evan Huus
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

View File

@@ -1,24 +0,0 @@
default: fmt vet errcheck test
test:
go test -v -timeout 60s -race ./...
vet:
go vet ./...
errcheck:
errcheck github.com/Shopify/sarama/...
fmt:
@if [ -n "$$(go fmt ./...)" ]; then echo 'Please run go fmt on your code.' && exit 1; fi
install_dependencies: install_errcheck install_go_vet get
install_errcheck:
go get github.com/kisielk/errcheck
install_go_vet:
go get golang.org/x/tools/cmd/vet
get:
go get -t

View File

@@ -1,31 +0,0 @@
sarama
======
[![GoDoc](https://godoc.org/github.com/Shopify/sarama?status.png)](https://godoc.org/github.com/Shopify/sarama)
[![Build Status](https://travis-ci.org/Shopify/sarama.svg?branch=master)](https://travis-ci.org/Shopify/sarama)
Sarama is an MIT-licensed Go client library for Apache Kafka 0.8 (and later).
### Getting started
- API documentation and example are available via godoc at https://godoc.org/github.com/Shopify/sarama.
- Mocks for testing are available in the [mocks](./mocks) subpackage.
- The [examples](./examples) directory contains more elaborate example applications.
- The [tools](./tools) directory contains command line tools that can be useful for testing, diagnostics, and instrumentation.
- There is a google group for Kafka client users and authors at https://groups.google.com/forum/#!forum/kafka-clients
### Compatibility and API stability
Sarama provides a "2 releases + 2 months" compatibility guarantee: we support the two latest releases of Kafka
and Go, and we provide a two month grace period for older releases. This means we currently officially
support Go 1.3 and 1.4, and Kafka 0.8.1 and 0.8.2.
Sarama follows semantic versioning and provides API stability via the gopkg.in service.
You can import a version with a guaranteed stable API via http://gopkg.in/Shopify/sarama.v1.
A changelog is available [here](CHANGELOG.md).
### Other
* [Sarama wiki](https://github.com/Shopify/sarama/wiki) to get started hacking on sarama itself.
* [Kafka Project Home](https://kafka.apache.org/)
* [Kafka Protocol Specification](https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol)

View File

@@ -1,22 +0,0 @@
# -*- mode: ruby -*-
# vi: set ft=ruby :
# Vagrantfile API/syntax version. Don't touch unless you know what you're doing!
VAGRANTFILE_API_VERSION = "2"
MEMORY = 3072
Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
config.vm.box = "hashicorp/precise64"
config.vm.provision :shell, path: "vagrant/provision.sh"
config.vm.network "private_network", ip: "192.168.100.67"
config.vm.provider "vmware_fusion" do |v|
v.vmx["memsize"] = MEMORY.to_s
end
config.vm.provider "virtualbox" do |v|
v.memory = MEMORY
end
end

View File

@@ -1,924 +0,0 @@
package sarama
import (
"fmt"
"sync"
"time"
"github.com/eapache/go-resiliency/breaker"
"github.com/eapache/queue"
)
// AsyncProducer publishes Kafka messages using a non-blocking API. It routes messages
// to the correct broker for the provided topic-partition, refreshing metadata as appropriate,
// and parses responses for errors. You must read from the Errors() channel or the
// producer will deadlock. You must call Close() or AsyncClose() on a producer to avoid
// leaks: it will not be garbage-collected automatically when it passes out of
// scope.
type AsyncProducer interface {
// AsyncClose triggers a shutdown of the producer, flushing any messages it may have
// buffered. The shutdown has completed when both the Errors and Successes channels
// have been closed. When calling AsyncClose, you *must* continue to read from those
// channels in order to drain the results of any messages in flight.
AsyncClose()
// Close shuts down the producer and flushes any messages it may have buffered.
// You must call this function before a producer object passes out of scope, as
// it may otherwise leak memory. You must call this before calling Close on the
// underlying client.
Close() error
// Input is the input channel for the user to write messages to that they wish to send.
Input() chan<- *ProducerMessage
// Successes is the success output channel back to the user when AckSuccesses is enabled.
// If Return.Successes is true, you MUST read from this channel or the Producer will deadlock.
// It is suggested that you send and read messages together in a single select statement.
Successes() <-chan *ProducerMessage
// Errors is the error output channel back to the user. You MUST read from this channel
// or the Producer will deadlock when the channel is full. Alternatively, you can set
// Producer.Return.Errors in your config to false, which prevents errors to be returned.
Errors() <-chan *ProducerError
}
type asyncProducer struct {
client Client
conf *Config
ownClient bool
errors chan *ProducerError
input, successes, retries chan *ProducerMessage
inFlight sync.WaitGroup
brokers map[*Broker]chan<- *ProducerMessage
brokerRefs map[chan<- *ProducerMessage]int
brokerLock sync.Mutex
}
// NewAsyncProducer creates a new AsyncProducer using the given broker addresses and configuration.
func NewAsyncProducer(addrs []string, conf *Config) (AsyncProducer, error) {
client, err := NewClient(addrs, conf)
if err != nil {
return nil, err
}
p, err := NewAsyncProducerFromClient(client)
if err != nil {
return nil, err
}
p.(*asyncProducer).ownClient = true
return p, nil
}
// NewAsyncProducerFromClient creates a new Producer using the given client. It is still
// necessary to call Close() on the underlying client when shutting down this producer.
func NewAsyncProducerFromClient(client Client) (AsyncProducer, error) {
// Check that we are not dealing with a closed Client before processing any other arguments
if client.Closed() {
return nil, ErrClosedClient
}
p := &asyncProducer{
client: client,
conf: client.Config(),
errors: make(chan *ProducerError),
input: make(chan *ProducerMessage),
successes: make(chan *ProducerMessage),
retries: make(chan *ProducerMessage),
brokers: make(map[*Broker]chan<- *ProducerMessage),
brokerRefs: make(map[chan<- *ProducerMessage]int),
}
// launch our singleton dispatchers
go withRecover(p.dispatcher)
go withRecover(p.retryHandler)
return p, nil
}
type flagSet int8
const (
chaser flagSet = 1 << iota // message is last in a group that failed
shutdown // start the shutdown process
)
// ProducerMessage is the collection of elements passed to the Producer in order to send a message.
type ProducerMessage struct {
Topic string // The Kafka topic for this message.
Key Encoder // The partitioning key for this message. It must implement the Encoder interface. Pre-existing Encoders include StringEncoder and ByteEncoder.
Value Encoder // The actual message to store in Kafka. It must implement the Encoder interface. Pre-existing Encoders include StringEncoder and ByteEncoder.
// These are filled in by the producer as the message is processed
Offset int64 // Offset is the offset of the message stored on the broker. This is only guaranteed to be defined if the message was successfully delivered and RequiredAcks is not NoResponse.
Partition int32 // Partition is the partition that the message was sent to. This is only guaranteed to be defined if the message was successfully delivered.
Metadata interface{} // This field is used to hold arbitrary data you wish to include so it will be available when receiving on the Successes and Errors channels. Sarama completely ignores this field and is only to be used for pass-through data.
retries int
flags flagSet
}
func (m *ProducerMessage) byteSize() int {
size := 26 // the metadata overhead of CRC, flags, etc.
if m.Key != nil {
size += m.Key.Length()
}
if m.Value != nil {
size += m.Value.Length()
}
return size
}
func (m *ProducerMessage) clear() {
m.flags = 0
m.retries = 0
}
// ProducerError is the type of error generated when the producer fails to deliver a message.
// It contains the original ProducerMessage as well as the actual error value.
type ProducerError struct {
Msg *ProducerMessage
Err error
}
func (pe ProducerError) Error() string {
return fmt.Sprintf("kafka: Failed to produce message to topic %s: %s", pe.Msg.Topic, pe.Err)
}
// ProducerErrors is a type that wraps a batch of "ProducerError"s and implements the Error interface.
// It can be returned from the Producer's Close method to avoid the need to manually drain the Errors channel
// when closing a producer.
type ProducerErrors []*ProducerError
func (pe ProducerErrors) Error() string {
return fmt.Sprintf("kafka: Failed to deliver %d messages.", len(pe))
}
func (p *asyncProducer) Errors() <-chan *ProducerError {
return p.errors
}
func (p *asyncProducer) Successes() <-chan *ProducerMessage {
return p.successes
}
func (p *asyncProducer) Input() chan<- *ProducerMessage {
return p.input
}
func (p *asyncProducer) Close() error {
p.AsyncClose()
if p.conf.Producer.Return.Successes {
go withRecover(func() {
for _ = range p.successes {
}
})
}
var errors ProducerErrors
if p.conf.Producer.Return.Errors {
for event := range p.errors {
errors = append(errors, event)
}
}
if len(errors) > 0 {
return errors
}
return nil
}
func (p *asyncProducer) AsyncClose() {
go withRecover(p.shutdown)
}
// singleton
// dispatches messages by topic
func (p *asyncProducer) dispatcher() {
handlers := make(map[string]chan<- *ProducerMessage)
shuttingDown := false
for msg := range p.input {
if msg == nil {
Logger.Println("Something tried to send a nil message, it was ignored.")
continue
}
if msg.flags&shutdown != 0 {
shuttingDown = true
p.inFlight.Done()
continue
} else if msg.retries == 0 {
if shuttingDown {
// we can't just call returnError here because that decrements the wait group,
// which hasn't been incremented yet for this message, and shouldn't be
pErr := &ProducerError{Msg: msg, Err: ErrShuttingDown}
if p.conf.Producer.Return.Errors {
p.errors <- pErr
} else {
Logger.Println(pErr)
}
continue
}
p.inFlight.Add(1)
}
if (p.conf.Producer.Compression == CompressionNone && msg.Value != nil && msg.Value.Length() > p.conf.Producer.MaxMessageBytes) ||
(msg.byteSize() > p.conf.Producer.MaxMessageBytes) {
p.returnError(msg, ErrMessageSizeTooLarge)
continue
}
handler := handlers[msg.Topic]
if handler == nil {
handler = p.newTopicProducer(msg.Topic)
handlers[msg.Topic] = handler
}
handler <- msg
}
for _, handler := range handlers {
close(handler)
}
}
// one per topic
// partitions messages, then dispatches them by partition
type topicProducer struct {
parent *asyncProducer
topic string
input <-chan *ProducerMessage
breaker *breaker.Breaker
handlers map[int32]chan<- *ProducerMessage
partitioner Partitioner
}
func (p *asyncProducer) newTopicProducer(topic string) chan<- *ProducerMessage {
input := make(chan *ProducerMessage, p.conf.ChannelBufferSize)
tp := &topicProducer{
parent: p,
topic: topic,
input: input,
breaker: breaker.New(3, 1, 10*time.Second),
handlers: make(map[int32]chan<- *ProducerMessage),
partitioner: p.conf.Producer.Partitioner(topic),
}
go withRecover(tp.dispatch)
return input
}
func (tp *topicProducer) dispatch() {
for msg := range tp.input {
if msg.retries == 0 {
if err := tp.partitionMessage(msg); err != nil {
tp.parent.returnError(msg, err)
continue
}
}
handler := tp.handlers[msg.Partition]
if handler == nil {
handler = tp.parent.newPartitionProducer(msg.Topic, msg.Partition)
tp.handlers[msg.Partition] = handler
}
handler <- msg
}
for _, handler := range tp.handlers {
close(handler)
}
}
func (tp *topicProducer) partitionMessage(msg *ProducerMessage) error {
var partitions []int32
err := tp.breaker.Run(func() (err error) {
if tp.partitioner.RequiresConsistency() {
partitions, err = tp.parent.client.Partitions(msg.Topic)
} else {
partitions, err = tp.parent.client.WritablePartitions(msg.Topic)
}
return
})
if err != nil {
return err
}
numPartitions := int32(len(partitions))
if numPartitions == 0 {
return ErrLeaderNotAvailable
}
choice, err := tp.partitioner.Partition(msg, numPartitions)
if err != nil {
return err
} else if choice < 0 || choice >= numPartitions {
return ErrInvalidPartition
}
msg.Partition = partitions[choice]
return nil
}
// one per partition per topic
// dispatches messages to the appropriate broker
// also responsible for maintaining message order during retries
type partitionProducer struct {
parent *asyncProducer
topic string
partition int32
input <-chan *ProducerMessage
leader *Broker
breaker *breaker.Breaker
output chan<- *ProducerMessage
// highWatermark tracks the "current" retry level, which is the only one where we actually let messages through,
// all other messages get buffered in retryState[msg.retries].buf to preserve ordering
// retryState[msg.retries].expectChaser simply tracks whether we've seen a chaser message for a given level (and
// therefore whether our buffer is complete and safe to flush)
highWatermark int
retryState []partitionRetryState
}
type partitionRetryState struct {
buf []*ProducerMessage
expectChaser bool
}
func (p *asyncProducer) newPartitionProducer(topic string, partition int32) chan<- *ProducerMessage {
input := make(chan *ProducerMessage, p.conf.ChannelBufferSize)
pp := &partitionProducer{
parent: p,
topic: topic,
partition: partition,
input: input,
breaker: breaker.New(3, 1, 10*time.Second),
retryState: make([]partitionRetryState, p.conf.Producer.Retry.Max+1),
}
go withRecover(pp.dispatch)
return input
}
func (pp *partitionProducer) dispatch() {
// try to prefetch the leader; if this doesn't work, we'll do a proper call to `updateLeader`
// on the first message
pp.leader, _ = pp.parent.client.Leader(pp.topic, pp.partition)
if pp.leader != nil {
pp.output = pp.parent.getBrokerProducer(pp.leader)
}
for msg := range pp.input {
if msg.retries > pp.highWatermark {
// a new, higher, retry level; handle it and then back off
pp.newHighWatermark(msg.retries)
time.Sleep(pp.parent.conf.Producer.Retry.Backoff)
} else if pp.highWatermark > 0 {
// we are retrying something (else highWatermark would be 0) but this message is not a *new* retry level
if msg.retries < pp.highWatermark {
// in fact this message is not even the current retry level, so buffer it for now (unless it's a just a chaser)
if msg.flags&chaser == chaser {
pp.retryState[msg.retries].expectChaser = false
pp.parent.inFlight.Done() // this chaser is now handled and will be garbage collected
} else {
pp.retryState[msg.retries].buf = append(pp.retryState[msg.retries].buf, msg)
}
continue
} else if msg.flags&chaser == chaser {
// this message is of the current retry level (msg.retries == highWatermark) and the chaser flag is set,
// meaning this retry level is done and we can go down (at least) one level and flush that
pp.retryState[pp.highWatermark].expectChaser = false
pp.flushRetryBuffers()
pp.parent.inFlight.Done() // this chaser is now handled and will be garbage collected
continue
}
}
// if we made it this far then the current msg contains real data, and can be sent to the next goroutine
// without breaking any of our ordering guarantees
if pp.output == nil {
if err := pp.updateLeader(); err != nil {
pp.parent.returnError(msg, err)
time.Sleep(pp.parent.conf.Producer.Retry.Backoff)
continue
}
Logger.Printf("producer/leader/%s/%d selected broker %d\n", pp.topic, pp.partition, pp.leader.ID())
}
pp.output <- msg
}
if pp.output != nil {
pp.parent.unrefBrokerProducer(pp.leader, pp.output)
}
}
func (pp *partitionProducer) newHighWatermark(hwm int) {
Logger.Printf("producer/leader/%s/%d state change to [retrying-%d]\n", pp.topic, pp.partition, hwm)
pp.highWatermark = hwm
// send off a chaser so that we know when everything "in between" has made it
// back to us and we can safely flush the backlog (otherwise we risk re-ordering messages)
pp.retryState[pp.highWatermark].expectChaser = true
pp.parent.inFlight.Add(1) // we're generating a chaser message; track it so we don't shut down while it's still inflight
pp.output <- &ProducerMessage{Topic: pp.topic, Partition: pp.partition, flags: chaser, retries: pp.highWatermark - 1}
// a new HWM means that our current broker selection is out of date
Logger.Printf("producer/leader/%s/%d abandoning broker %d\n", pp.topic, pp.partition, pp.leader.ID())
pp.parent.unrefBrokerProducer(pp.leader, pp.output)
pp.output = nil
}
func (pp *partitionProducer) flushRetryBuffers() {
Logger.Printf("producer/leader/%s/%d state change to [flushing-%d]\n", pp.topic, pp.partition, pp.highWatermark)
for {
pp.highWatermark--
if pp.output == nil {
if err := pp.updateLeader(); err != nil {
pp.parent.returnErrors(pp.retryState[pp.highWatermark].buf, err)
goto flushDone
}
Logger.Printf("producer/leader/%s/%d selected broker %d\n", pp.topic, pp.partition, pp.leader.ID())
}
for _, msg := range pp.retryState[pp.highWatermark].buf {
pp.output <- msg
}
flushDone:
pp.retryState[pp.highWatermark].buf = nil
if pp.retryState[pp.highWatermark].expectChaser {
Logger.Printf("producer/leader/%s/%d state change to [retrying-%d]\n", pp.topic, pp.partition, pp.highWatermark)
break
} else if pp.highWatermark == 0 {
Logger.Printf("producer/leader/%s/%d state change to [normal]\n", pp.topic, pp.partition)
break
}
}
}
func (pp *partitionProducer) updateLeader() error {
return pp.breaker.Run(func() (err error) {
if err = pp.parent.client.RefreshMetadata(pp.topic); err != nil {
return err
}
if pp.leader, err = pp.parent.client.Leader(pp.topic, pp.partition); err != nil {
return err
}
pp.output = pp.parent.getBrokerProducer(pp.leader)
return nil
})
}
// one per broker, constructs both an aggregator and a flusher
func (p *asyncProducer) newBrokerProducer(broker *Broker) chan<- *ProducerMessage {
input := make(chan *ProducerMessage)
bridge := make(chan []*ProducerMessage)
a := &aggregator{
parent: p,
broker: broker,
input: input,
output: bridge,
}
go withRecover(a.run)
f := &flusher{
parent: p,
broker: broker,
input: bridge,
currentRetries: make(map[string]map[int32]error),
}
go withRecover(f.run)
return input
}
// groups messages together into appropriately-sized batches for sending to the broker
// based on https://godoc.org/github.com/eapache/channels#BatchingChannel
type aggregator struct {
parent *asyncProducer
broker *Broker
input <-chan *ProducerMessage
output chan<- []*ProducerMessage
buffer []*ProducerMessage
bufferBytes int
timer <-chan time.Time
}
func (a *aggregator) run() {
var output chan<- []*ProducerMessage
for {
select {
case msg := <-a.input:
if msg == nil {
goto shutdown
}
if a.wouldOverflow(msg) {
Logger.Printf("producer/aggregator/%d maximum request accumulated, forcing blocking flush\n", a.broker.ID())
a.output <- a.buffer
a.reset()
output = nil
}
a.buffer = append(a.buffer, msg)
a.bufferBytes += msg.byteSize()
if a.readyToFlush(msg) {
output = a.output
} else if a.parent.conf.Producer.Flush.Frequency > 0 && a.timer == nil {
a.timer = time.After(a.parent.conf.Producer.Flush.Frequency)
}
case <-a.timer:
output = a.output
case output <- a.buffer:
a.reset()
output = nil
}
}
shutdown:
if len(a.buffer) > 0 {
a.output <- a.buffer
}
close(a.output)
}
func (a *aggregator) wouldOverflow(msg *ProducerMessage) bool {
switch {
// Would we overflow our maximum possible size-on-the-wire? 10KiB is arbitrary overhead for safety.
case a.bufferBytes+msg.byteSize() >= int(MaxRequestSize-(10*1024)):
return true
// Would we overflow the size-limit of a compressed message-batch?
case a.parent.conf.Producer.Compression != CompressionNone && a.bufferBytes+msg.byteSize() >= a.parent.conf.Producer.MaxMessageBytes:
return true
// Would we overflow simply in number of messages?
case a.parent.conf.Producer.Flush.MaxMessages > 0 && len(a.buffer) >= a.parent.conf.Producer.Flush.MaxMessages:
return true
default:
return false
}
}
func (a *aggregator) readyToFlush(msg *ProducerMessage) bool {
switch {
// If all three config values are 0, we always flush as-fast-as-possible
case a.parent.conf.Producer.Flush.Frequency == 0 && a.parent.conf.Producer.Flush.Bytes == 0 && a.parent.conf.Producer.Flush.Messages == 0:
return true
// If the messages is a chaser we must flush to maintain the state-machine
case msg.flags&chaser == chaser:
return true
// If we've passed the message trigger-point
case a.parent.conf.Producer.Flush.Messages > 0 && len(a.buffer) >= a.parent.conf.Producer.Flush.Messages:
return true
// If we've passed the byte trigger-point
case a.parent.conf.Producer.Flush.Bytes > 0 && a.bufferBytes >= a.parent.conf.Producer.Flush.Bytes:
return true
default:
return false
}
}
func (a *aggregator) reset() {
a.timer = nil
a.buffer = nil
a.bufferBytes = 0
}
// takes a batch at a time from the aggregator and sends to the broker
type flusher struct {
parent *asyncProducer
broker *Broker
input <-chan []*ProducerMessage
currentRetries map[string]map[int32]error
}
func (f *flusher) run() {
var closing error
Logger.Printf("producer/flusher/%d starting up\n", f.broker.ID())
for batch := range f.input {
if closing != nil {
f.parent.retryMessages(batch, closing)
continue
}
msgSets := f.groupAndFilter(batch)
request := f.parent.buildRequest(msgSets)
if request == nil {
continue
}
response, err := f.broker.Produce(request)
switch err.(type) {
case nil:
break
case PacketEncodingError:
f.parent.returnErrors(batch, err)
continue
default:
Logger.Printf("producer/flusher/%d state change to [closing] because %s\n", f.broker.ID(), err)
f.parent.abandonBrokerConnection(f.broker)
_ = f.broker.Close()
closing = err
f.parent.retryMessages(batch, err)
continue
}
if response == nil {
// this only happens when RequiredAcks is NoResponse, so we have to assume success
f.parent.returnSuccesses(batch)
continue
}
f.parseResponse(msgSets, response)
}
Logger.Printf("producer/flusher/%d shut down\n", f.broker.ID())
}
func (f *flusher) groupAndFilter(batch []*ProducerMessage) map[string]map[int32][]*ProducerMessage {
msgSets := make(map[string]map[int32][]*ProducerMessage)
for i, msg := range batch {
if f.currentRetries[msg.Topic] != nil && f.currentRetries[msg.Topic][msg.Partition] != nil {
// we're currently retrying this partition so we need to filter out this message
f.parent.retryMessages([]*ProducerMessage{msg}, f.currentRetries[msg.Topic][msg.Partition])
batch[i] = nil
if msg.flags&chaser == chaser {
// ...but now we can start processing future messages again
Logger.Printf("producer/flusher/%d state change to [normal] on %s/%d\n",
f.broker.ID(), msg.Topic, msg.Partition)
delete(f.currentRetries[msg.Topic], msg.Partition)
}
continue
}
partitionSet := msgSets[msg.Topic]
if partitionSet == nil {
partitionSet = make(map[int32][]*ProducerMessage)
msgSets[msg.Topic] = partitionSet
}
partitionSet[msg.Partition] = append(partitionSet[msg.Partition], msg)
}
return msgSets
}
func (f *flusher) parseResponse(msgSets map[string]map[int32][]*ProducerMessage, response *ProduceResponse) {
// we iterate through the blocks in the request set, not the response, so that we notice
// if the response is missing a block completely
for topic, partitionSet := range msgSets {
for partition, msgs := range partitionSet {
block := response.GetBlock(topic, partition)
if block == nil {
f.parent.returnErrors(msgs, ErrIncompleteResponse)
continue
}
switch block.Err {
// Success
case ErrNoError:
for i := range msgs {
msgs[i].Offset = block.Offset + int64(i)
}
f.parent.returnSuccesses(msgs)
// Retriable errors
case ErrUnknownTopicOrPartition, ErrNotLeaderForPartition, ErrLeaderNotAvailable,
ErrRequestTimedOut, ErrNotEnoughReplicas, ErrNotEnoughReplicasAfterAppend:
Logger.Printf("producer/flusher/%d state change to [retrying] on %s/%d because %v\n",
f.broker.ID(), topic, partition, block.Err)
if f.currentRetries[topic] == nil {
f.currentRetries[topic] = make(map[int32]error)
}
f.currentRetries[topic][partition] = block.Err
f.parent.retryMessages(msgs, block.Err)
// Other non-retriable errors
default:
f.parent.returnErrors(msgs, block.Err)
}
}
}
}
// singleton
// effectively a "bridge" between the flushers and the dispatcher in order to avoid deadlock
// based on https://godoc.org/github.com/eapache/channels#InfiniteChannel
func (p *asyncProducer) retryHandler() {
var msg *ProducerMessage
buf := queue.New()
for {
if buf.Length() == 0 {
msg = <-p.retries
} else {
select {
case msg = <-p.retries:
case p.input <- buf.Peek().(*ProducerMessage):
buf.Remove()
continue
}
}
if msg == nil {
return
}
buf.Add(msg)
}
}
// utility functions
func (p *asyncProducer) shutdown() {
Logger.Println("Producer shutting down.")
p.inFlight.Add(1)
p.input <- &ProducerMessage{flags: shutdown}
p.inFlight.Wait()
if p.ownClient {
err := p.client.Close()
if err != nil {
Logger.Println("producer/shutdown failed to close the embedded client:", err)
}
}
close(p.input)
close(p.retries)
close(p.errors)
close(p.successes)
}
func (p *asyncProducer) buildRequest(batch map[string]map[int32][]*ProducerMessage) *ProduceRequest {
req := &ProduceRequest{RequiredAcks: p.conf.Producer.RequiredAcks, Timeout: int32(p.conf.Producer.Timeout / time.Millisecond)}
empty := true
for topic, partitionSet := range batch {
for partition, msgSet := range partitionSet {
setToSend := new(MessageSet)
setSize := 0
for _, msg := range msgSet {
var keyBytes, valBytes []byte
var err error
if msg.Key != nil {
if keyBytes, err = msg.Key.Encode(); err != nil {
p.returnError(msg, err)
continue
}
}
if msg.Value != nil {
if valBytes, err = msg.Value.Encode(); err != nil {
p.returnError(msg, err)
continue
}
}
if p.conf.Producer.Compression != CompressionNone && setSize+msg.byteSize() > p.conf.Producer.MaxMessageBytes {
// compression causes message-sets to be wrapped as single messages, which have tighter
// size requirements, so we have to respect those limits
valBytes, err := encode(setToSend)
if err != nil {
Logger.Println(err) // if this happens, it's basically our fault.
panic(err)
}
req.AddMessage(topic, partition, &Message{Codec: p.conf.Producer.Compression, Key: nil, Value: valBytes})
setToSend = new(MessageSet)
setSize = 0
}
setSize += msg.byteSize()
setToSend.addMessage(&Message{Codec: CompressionNone, Key: keyBytes, Value: valBytes})
empty = false
}
if p.conf.Producer.Compression == CompressionNone {
req.AddSet(topic, partition, setToSend)
} else {
valBytes, err := encode(setToSend)
if err != nil {
Logger.Println(err) // if this happens, it's basically our fault.
panic(err)
}
req.AddMessage(topic, partition, &Message{Codec: p.conf.Producer.Compression, Key: nil, Value: valBytes})
}
}
}
if empty {
return nil
}
return req
}
func (p *asyncProducer) returnError(msg *ProducerMessage, err error) {
msg.clear()
pErr := &ProducerError{Msg: msg, Err: err}
if p.conf.Producer.Return.Errors {
p.errors <- pErr
} else {
Logger.Println(pErr)
}
p.inFlight.Done()
}
func (p *asyncProducer) returnErrors(batch []*ProducerMessage, err error) {
for _, msg := range batch {
if msg != nil {
p.returnError(msg, err)
}
}
}
func (p *asyncProducer) returnSuccesses(batch []*ProducerMessage) {
for _, msg := range batch {
if msg == nil {
continue
}
if p.conf.Producer.Return.Successes {
msg.clear()
p.successes <- msg
}
p.inFlight.Done()
}
}
func (p *asyncProducer) retryMessages(batch []*ProducerMessage, err error) {
for _, msg := range batch {
if msg == nil {
continue
}
if msg.retries >= p.conf.Producer.Retry.Max {
p.returnError(msg, err)
} else {
msg.retries++
p.retries <- msg
}
}
}
func (p *asyncProducer) getBrokerProducer(broker *Broker) chan<- *ProducerMessage {
p.brokerLock.Lock()
defer p.brokerLock.Unlock()
bp := p.brokers[broker]
if bp == nil {
bp = p.newBrokerProducer(broker)
p.brokers[broker] = bp
p.brokerRefs[bp] = 0
}
p.brokerRefs[bp]++
return bp
}
func (p *asyncProducer) unrefBrokerProducer(broker *Broker, bp chan<- *ProducerMessage) {
p.brokerLock.Lock()
defer p.brokerLock.Unlock()
p.brokerRefs[bp]--
if p.brokerRefs[bp] == 0 {
close(bp)
delete(p.brokerRefs, bp)
if p.brokers[broker] == bp {
delete(p.brokers, broker)
}
}
}
func (p *asyncProducer) abandonBrokerConnection(broker *Broker) {
p.brokerLock.Lock()
defer p.brokerLock.Unlock()
delete(p.brokers, broker)
}

View File

@@ -1,743 +0,0 @@
package sarama
import (
"errors"
"log"
"os"
"os/signal"
"sync"
"testing"
"time"
)
const TestMessage = "ABC THE MESSAGE"
func closeProducer(t *testing.T, p AsyncProducer) {
var wg sync.WaitGroup
p.AsyncClose()
wg.Add(2)
go func() {
for _ = range p.Successes() {
t.Error("Unexpected message on Successes()")
}
wg.Done()
}()
go func() {
for msg := range p.Errors() {
t.Error(msg.Err)
}
wg.Done()
}()
wg.Wait()
}
func expectResults(t *testing.T, p AsyncProducer, successes, errors int) {
for successes > 0 || errors > 0 {
select {
case msg := <-p.Errors():
if msg.Msg.flags != 0 {
t.Error("Message had flags set")
}
errors--
if errors < 0 {
t.Error(msg.Err)
}
case msg := <-p.Successes():
if msg.flags != 0 {
t.Error("Message had flags set")
}
successes--
if successes < 0 {
t.Error("Too many successes")
}
}
}
}
type testPartitioner chan *int32
func (p testPartitioner) Partition(msg *ProducerMessage, numPartitions int32) (int32, error) {
part := <-p
if part == nil {
return 0, errors.New("BOOM")
}
return *part, nil
}
func (p testPartitioner) RequiresConsistency() bool {
return true
}
func (p testPartitioner) feed(partition int32) {
p <- &partition
}
func TestAsyncProducer(t *testing.T) {
seedBroker := newMockBroker(t, 1)
leader := newMockBroker(t, 2)
metadataResponse := new(MetadataResponse)
metadataResponse.AddBroker(leader.Addr(), leader.BrokerID())
metadataResponse.AddTopicPartition("my_topic", 0, leader.BrokerID(), nil, nil, ErrNoError)
seedBroker.Returns(metadataResponse)
prodSuccess := new(ProduceResponse)
prodSuccess.AddTopicPartition("my_topic", 0, ErrNoError)
leader.Returns(prodSuccess)
config := NewConfig()
config.Producer.Flush.Messages = 10
config.Producer.Return.Successes = true
producer, err := NewAsyncProducer([]string{seedBroker.Addr()}, config)
if err != nil {
t.Fatal(err)
}
for i := 0; i < 10; i++ {
producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage), Metadata: i}
}
for i := 0; i < 10; i++ {
select {
case msg := <-producer.Errors():
t.Error(msg.Err)
if msg.Msg.flags != 0 {
t.Error("Message had flags set")
}
case msg := <-producer.Successes():
if msg.flags != 0 {
t.Error("Message had flags set")
}
if msg.Metadata.(int) != i {
t.Error("Message metadata did not match")
}
}
}
closeProducer(t, producer)
leader.Close()
seedBroker.Close()
}
func TestAsyncProducerMultipleFlushes(t *testing.T) {
seedBroker := newMockBroker(t, 1)
leader := newMockBroker(t, 2)
metadataResponse := new(MetadataResponse)
metadataResponse.AddBroker(leader.Addr(), leader.BrokerID())
metadataResponse.AddTopicPartition("my_topic", 0, leader.BrokerID(), nil, nil, ErrNoError)
seedBroker.Returns(metadataResponse)
prodSuccess := new(ProduceResponse)
prodSuccess.AddTopicPartition("my_topic", 0, ErrNoError)
leader.Returns(prodSuccess)
leader.Returns(prodSuccess)
leader.Returns(prodSuccess)
config := NewConfig()
config.Producer.Flush.Messages = 5
config.Producer.Return.Successes = true
producer, err := NewAsyncProducer([]string{seedBroker.Addr()}, config)
if err != nil {
t.Fatal(err)
}
for flush := 0; flush < 3; flush++ {
for i := 0; i < 5; i++ {
producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)}
}
expectResults(t, producer, 5, 0)
}
closeProducer(t, producer)
leader.Close()
seedBroker.Close()
}
func TestAsyncProducerMultipleBrokers(t *testing.T) {
seedBroker := newMockBroker(t, 1)
leader0 := newMockBroker(t, 2)
leader1 := newMockBroker(t, 3)
metadataResponse := new(MetadataResponse)
metadataResponse.AddBroker(leader0.Addr(), leader0.BrokerID())
metadataResponse.AddBroker(leader1.Addr(), leader1.BrokerID())
metadataResponse.AddTopicPartition("my_topic", 0, leader0.BrokerID(), nil, nil, ErrNoError)
metadataResponse.AddTopicPartition("my_topic", 1, leader1.BrokerID(), nil, nil, ErrNoError)
seedBroker.Returns(metadataResponse)
prodResponse0 := new(ProduceResponse)
prodResponse0.AddTopicPartition("my_topic", 0, ErrNoError)
leader0.Returns(prodResponse0)
prodResponse1 := new(ProduceResponse)
prodResponse1.AddTopicPartition("my_topic", 1, ErrNoError)
leader1.Returns(prodResponse1)
config := NewConfig()
config.Producer.Flush.Messages = 5
config.Producer.Return.Successes = true
config.Producer.Partitioner = NewRoundRobinPartitioner
producer, err := NewAsyncProducer([]string{seedBroker.Addr()}, config)
if err != nil {
t.Fatal(err)
}
for i := 0; i < 10; i++ {
producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)}
}
expectResults(t, producer, 10, 0)
closeProducer(t, producer)
leader1.Close()
leader0.Close()
seedBroker.Close()
}
func TestAsyncProducerCustomPartitioner(t *testing.T) {
seedBroker := newMockBroker(t, 1)
leader := newMockBroker(t, 2)
metadataResponse := new(MetadataResponse)
metadataResponse.AddBroker(leader.Addr(), leader.BrokerID())
metadataResponse.AddTopicPartition("my_topic", 0, leader.BrokerID(), nil, nil, ErrNoError)
seedBroker.Returns(metadataResponse)
prodResponse := new(ProduceResponse)
prodResponse.AddTopicPartition("my_topic", 0, ErrNoError)
leader.Returns(prodResponse)
config := NewConfig()
config.Producer.Flush.Messages = 2
config.Producer.Return.Successes = true
config.Producer.Partitioner = func(topic string) Partitioner {
p := make(testPartitioner)
go func() {
p.feed(0)
p <- nil
p <- nil
p <- nil
p.feed(0)
}()
return p
}
producer, err := NewAsyncProducer([]string{seedBroker.Addr()}, config)
if err != nil {
t.Fatal(err)
}
for i := 0; i < 5; i++ {
producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)}
}
expectResults(t, producer, 2, 3)
closeProducer(t, producer)
leader.Close()
seedBroker.Close()
}
func TestAsyncProducerFailureRetry(t *testing.T) {
seedBroker := newMockBroker(t, 1)
leader1 := newMockBroker(t, 2)
leader2 := newMockBroker(t, 3)
metadataLeader1 := new(MetadataResponse)
metadataLeader1.AddBroker(leader1.Addr(), leader1.BrokerID())
metadataLeader1.AddTopicPartition("my_topic", 0, leader1.BrokerID(), nil, nil, ErrNoError)
seedBroker.Returns(metadataLeader1)
config := NewConfig()
config.Producer.Flush.Messages = 10
config.Producer.Return.Successes = true
config.Producer.Retry.Backoff = 0
producer, err := NewAsyncProducer([]string{seedBroker.Addr()}, config)
if err != nil {
t.Fatal(err)
}
seedBroker.Close()
for i := 0; i < 10; i++ {
producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)}
}
prodNotLeader := new(ProduceResponse)
prodNotLeader.AddTopicPartition("my_topic", 0, ErrNotLeaderForPartition)
leader1.Returns(prodNotLeader)
metadataLeader2 := new(MetadataResponse)
metadataLeader2.AddBroker(leader2.Addr(), leader2.BrokerID())
metadataLeader2.AddTopicPartition("my_topic", 0, leader2.BrokerID(), nil, nil, ErrNoError)
leader1.Returns(metadataLeader2)
prodSuccess := new(ProduceResponse)
prodSuccess.AddTopicPartition("my_topic", 0, ErrNoError)
leader2.Returns(prodSuccess)
expectResults(t, producer, 10, 0)
leader1.Close()
for i := 0; i < 10; i++ {
producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)}
}
leader2.Returns(prodSuccess)
expectResults(t, producer, 10, 0)
leader2.Close()
closeProducer(t, producer)
}
// If a Kafka broker becomes unavailable and then returns back in service, then
// producer reconnects to it and continues sending messages.
func TestAsyncProducerBrokerBounce(t *testing.T) {
// Given
seedBroker := newMockBroker(t, 1)
leader := newMockBroker(t, 2)
leaderAddr := leader.Addr()
metadataResponse := new(MetadataResponse)
metadataResponse.AddBroker(leaderAddr, leader.BrokerID())
metadataResponse.AddTopicPartition("my_topic", 0, leader.BrokerID(), nil, nil, ErrNoError)
seedBroker.Returns(metadataResponse)
prodSuccess := new(ProduceResponse)
prodSuccess.AddTopicPartition("my_topic", 0, ErrNoError)
config := NewConfig()
config.Producer.Flush.Messages = 1
config.Producer.Return.Successes = true
config.Producer.Retry.Backoff = 0
producer, err := NewAsyncProducer([]string{seedBroker.Addr()}, config)
if err != nil {
t.Fatal(err)
}
producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)}
leader.Returns(prodSuccess)
expectResults(t, producer, 1, 0)
// When: a broker connection gets reset by a broker (network glitch, restart, you name it).
leader.Close() // producer should get EOF
leader = newMockBrokerAddr(t, 2, leaderAddr) // start it up again right away for giggles
seedBroker.Returns(metadataResponse) // tell it to go to broker 2 again
// Then: a produced message goes through the new broker connection.
producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)}
leader.Returns(prodSuccess)
expectResults(t, producer, 1, 0)
closeProducer(t, producer)
seedBroker.Close()
leader.Close()
}
func TestAsyncProducerBrokerBounceWithStaleMetadata(t *testing.T) {
seedBroker := newMockBroker(t, 1)
leader1 := newMockBroker(t, 2)
leader2 := newMockBroker(t, 3)
metadataLeader1 := new(MetadataResponse)
metadataLeader1.AddBroker(leader1.Addr(), leader1.BrokerID())
metadataLeader1.AddTopicPartition("my_topic", 0, leader1.BrokerID(), nil, nil, ErrNoError)
seedBroker.Returns(metadataLeader1)
config := NewConfig()
config.Producer.Flush.Messages = 10
config.Producer.Return.Successes = true
config.Producer.Retry.Max = 3
config.Producer.Retry.Backoff = 0
producer, err := NewAsyncProducer([]string{seedBroker.Addr()}, config)
if err != nil {
t.Fatal(err)
}
for i := 0; i < 10; i++ {
producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)}
}
leader1.Close() // producer should get EOF
seedBroker.Returns(metadataLeader1) // tell it to go to leader1 again even though it's still down
seedBroker.Returns(metadataLeader1) // tell it to go to leader1 again even though it's still down
// ok fine, tell it to go to leader2 finally
metadataLeader2 := new(MetadataResponse)
metadataLeader2.AddBroker(leader2.Addr(), leader2.BrokerID())
metadataLeader2.AddTopicPartition("my_topic", 0, leader2.BrokerID(), nil, nil, ErrNoError)
seedBroker.Returns(metadataLeader2)
prodSuccess := new(ProduceResponse)
prodSuccess.AddTopicPartition("my_topic", 0, ErrNoError)
leader2.Returns(prodSuccess)
expectResults(t, producer, 10, 0)
seedBroker.Close()
leader2.Close()
closeProducer(t, producer)
}
func TestAsyncProducerMultipleRetries(t *testing.T) {
seedBroker := newMockBroker(t, 1)
leader1 := newMockBroker(t, 2)
leader2 := newMockBroker(t, 3)
metadataLeader1 := new(MetadataResponse)
metadataLeader1.AddBroker(leader1.Addr(), leader1.BrokerID())
metadataLeader1.AddTopicPartition("my_topic", 0, leader1.BrokerID(), nil, nil, ErrNoError)
seedBroker.Returns(metadataLeader1)
config := NewConfig()
config.Producer.Flush.Messages = 10
config.Producer.Return.Successes = true
config.Producer.Retry.Max = 4
config.Producer.Retry.Backoff = 0
producer, err := NewAsyncProducer([]string{seedBroker.Addr()}, config)
if err != nil {
t.Fatal(err)
}
for i := 0; i < 10; i++ {
producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)}
}
prodNotLeader := new(ProduceResponse)
prodNotLeader.AddTopicPartition("my_topic", 0, ErrNotLeaderForPartition)
leader1.Returns(prodNotLeader)
metadataLeader2 := new(MetadataResponse)
metadataLeader2.AddBroker(leader2.Addr(), leader2.BrokerID())
metadataLeader2.AddTopicPartition("my_topic", 0, leader2.BrokerID(), nil, nil, ErrNoError)
seedBroker.Returns(metadataLeader2)
leader2.Returns(prodNotLeader)
seedBroker.Returns(metadataLeader1)
leader1.Returns(prodNotLeader)
seedBroker.Returns(metadataLeader1)
leader1.Returns(prodNotLeader)
seedBroker.Returns(metadataLeader2)
prodSuccess := new(ProduceResponse)
prodSuccess.AddTopicPartition("my_topic", 0, ErrNoError)
leader2.Returns(prodSuccess)
expectResults(t, producer, 10, 0)
for i := 0; i < 10; i++ {
producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)}
}
leader2.Returns(prodSuccess)
expectResults(t, producer, 10, 0)
seedBroker.Close()
leader1.Close()
leader2.Close()
closeProducer(t, producer)
}
func TestAsyncProducerOutOfRetries(t *testing.T) {
t.Skip("Enable once bug #294 is fixed.")
seedBroker := newMockBroker(t, 1)
leader := newMockBroker(t, 2)
metadataResponse := new(MetadataResponse)
metadataResponse.AddBroker(leader.Addr(), leader.BrokerID())
metadataResponse.AddTopicPartition("my_topic", 0, leader.BrokerID(), nil, nil, ErrNoError)
seedBroker.Returns(metadataResponse)
config := NewConfig()
config.Producer.Flush.Messages = 10
config.Producer.Return.Successes = true
config.Producer.Retry.Backoff = 0
config.Producer.Retry.Max = 0
producer, err := NewAsyncProducer([]string{seedBroker.Addr()}, config)
if err != nil {
t.Fatal(err)
}
for i := 0; i < 10; i++ {
producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)}
}
prodNotLeader := new(ProduceResponse)
prodNotLeader.AddTopicPartition("my_topic", 0, ErrNotLeaderForPartition)
leader.Returns(prodNotLeader)
for i := 0; i < 10; i++ {
select {
case msg := <-producer.Errors():
if msg.Err != ErrNotLeaderForPartition {
t.Error(msg.Err)
}
case <-producer.Successes():
t.Error("Unexpected success")
}
}
seedBroker.Returns(metadataResponse)
for i := 0; i < 10; i++ {
producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)}
}
prodSuccess := new(ProduceResponse)
prodSuccess.AddTopicPartition("my_topic", 0, ErrNoError)
leader.Returns(prodSuccess)
expectResults(t, producer, 10, 0)
leader.Close()
seedBroker.Close()
safeClose(t, producer)
}
func TestAsyncProducerRetryWithReferenceOpen(t *testing.T) {
seedBroker := newMockBroker(t, 1)
leader := newMockBroker(t, 2)
leaderAddr := leader.Addr()
metadataResponse := new(MetadataResponse)
metadataResponse.AddBroker(leaderAddr, leader.BrokerID())
metadataResponse.AddTopicPartition("my_topic", 0, leader.BrokerID(), nil, nil, ErrNoError)
metadataResponse.AddTopicPartition("my_topic", 1, leader.BrokerID(), nil, nil, ErrNoError)
seedBroker.Returns(metadataResponse)
config := NewConfig()
config.Producer.Return.Successes = true
config.Producer.Retry.Backoff = 0
config.Producer.Retry.Max = 1
config.Producer.Partitioner = NewRoundRobinPartitioner
producer, err := NewAsyncProducer([]string{seedBroker.Addr()}, config)
if err != nil {
t.Fatal(err)
}
// prime partition 0
producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)}
prodSuccess := new(ProduceResponse)
prodSuccess.AddTopicPartition("my_topic", 0, ErrNoError)
leader.Returns(prodSuccess)
expectResults(t, producer, 1, 0)
// prime partition 1
producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)}
prodSuccess = new(ProduceResponse)
prodSuccess.AddTopicPartition("my_topic", 1, ErrNoError)
leader.Returns(prodSuccess)
expectResults(t, producer, 1, 0)
// reboot the broker (the producer will get EOF on its existing connection)
leader.Close()
leader = newMockBrokerAddr(t, 2, leaderAddr)
// send another message on partition 0 to trigger the EOF and retry
producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)}
// tell partition 0 to go to that broker again
seedBroker.Returns(metadataResponse)
// succeed this time
prodSuccess = new(ProduceResponse)
prodSuccess.AddTopicPartition("my_topic", 0, ErrNoError)
leader.Returns(prodSuccess)
expectResults(t, producer, 1, 0)
// shutdown
closeProducer(t, producer)
seedBroker.Close()
leader.Close()
}
func TestAsyncProducerFlusherRetryCondition(t *testing.T) {
seedBroker := newMockBroker(t, 1)
leader := newMockBroker(t, 2)
metadataResponse := new(MetadataResponse)
metadataResponse.AddBroker(leader.Addr(), leader.BrokerID())
metadataResponse.AddTopicPartition("my_topic", 0, leader.BrokerID(), nil, nil, ErrNoError)
metadataResponse.AddTopicPartition("my_topic", 1, leader.BrokerID(), nil, nil, ErrNoError)
seedBroker.Returns(metadataResponse)
config := NewConfig()
config.Producer.Flush.Messages = 5
config.Producer.Return.Successes = true
config.Producer.Retry.Backoff = 0
config.Producer.Retry.Max = 1
config.Producer.Partitioner = NewManualPartitioner
producer, err := NewAsyncProducer([]string{seedBroker.Addr()}, config)
if err != nil {
t.Fatal(err)
}
// prime partitions
for p := int32(0); p < 2; p++ {
for i := 0; i < 5; i++ {
producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage), Partition: p}
}
prodSuccess := new(ProduceResponse)
prodSuccess.AddTopicPartition("my_topic", p, ErrNoError)
leader.Returns(prodSuccess)
expectResults(t, producer, 5, 0)
}
// send more messages on partition 0
for i := 0; i < 5; i++ {
producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage), Partition: 0}
}
prodNotLeader := new(ProduceResponse)
prodNotLeader.AddTopicPartition("my_topic", 0, ErrNotLeaderForPartition)
leader.Returns(prodNotLeader)
// tell partition 0 to go to that broker again
seedBroker.Returns(metadataResponse)
// succeed this time
prodSuccess := new(ProduceResponse)
prodSuccess.AddTopicPartition("my_topic", 0, ErrNoError)
leader.Returns(prodSuccess)
expectResults(t, producer, 5, 0)
// put five more through
for i := 0; i < 5; i++ {
producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage), Partition: 0}
}
leader.Returns(prodSuccess)
expectResults(t, producer, 5, 0)
// shutdown
closeProducer(t, producer)
seedBroker.Close()
leader.Close()
}
func TestAsyncProducerRetryShutdown(t *testing.T) {
seedBroker := newMockBroker(t, 1)
leader := newMockBroker(t, 2)
metadataLeader := new(MetadataResponse)
metadataLeader.AddBroker(leader.Addr(), leader.BrokerID())
metadataLeader.AddTopicPartition("my_topic", 0, leader.BrokerID(), nil, nil, ErrNoError)
seedBroker.Returns(metadataLeader)
config := NewConfig()
config.Producer.Flush.Messages = 10
config.Producer.Return.Successes = true
config.Producer.Retry.Backoff = 0
producer, err := NewAsyncProducer([]string{seedBroker.Addr()}, config)
if err != nil {
t.Fatal(err)
}
for i := 0; i < 10; i++ {
producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)}
}
producer.AsyncClose()
time.Sleep(5 * time.Millisecond) // let the shutdown goroutine kick in
producer.Input() <- &ProducerMessage{Topic: "FOO"}
if err := <-producer.Errors(); err.Err != ErrShuttingDown {
t.Error(err)
}
prodNotLeader := new(ProduceResponse)
prodNotLeader.AddTopicPartition("my_topic", 0, ErrNotLeaderForPartition)
leader.Returns(prodNotLeader)
seedBroker.Returns(metadataLeader)
prodSuccess := new(ProduceResponse)
prodSuccess.AddTopicPartition("my_topic", 0, ErrNoError)
leader.Returns(prodSuccess)
expectResults(t, producer, 10, 0)
seedBroker.Close()
leader.Close()
// wait for the async-closed producer to shut down fully
for err := range producer.Errors() {
t.Error(err)
}
}
// This example shows how to use the producer while simultaneously
// reading the Errors channel to know about any failures.
func ExampleAsyncProducer_select() {
producer, err := NewAsyncProducer([]string{"localhost:9092"}, nil)
if err != nil {
panic(err)
}
defer func() {
if err := producer.Close(); err != nil {
log.Fatalln(err)
}
}()
// Trap SIGINT to trigger a shutdown.
signals := make(chan os.Signal, 1)
signal.Notify(signals, os.Interrupt)
var enqueued, errors int
ProducerLoop:
for {
select {
case producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder("testing 123")}:
enqueued++
case err := <-producer.Errors():
log.Println("Failed to produce message", err)
errors++
case <-signals:
break ProducerLoop
}
}
log.Printf("Enqueued: %d; errors: %d\n", enqueued, errors)
}
// This example shows how to use the producer with separate goroutines
// reading from the Successes and Errors channels. Note that in order
// for the Successes channel to be populated, you have to set
// config.Producer.Return.Successes to true.
func ExampleAsyncProducer_goroutines() {
config := NewConfig()
config.Producer.Return.Successes = true
producer, err := NewAsyncProducer([]string{"localhost:9092"}, config)
if err != nil {
panic(err)
}
// Trap SIGINT to trigger a graceful shutdown.
signals := make(chan os.Signal, 1)
signal.Notify(signals, os.Interrupt)
var (
wg sync.WaitGroup
enqueued, successes, errors int
)
wg.Add(1)
go func() {
defer wg.Done()
for _ = range producer.Successes() {
successes++
}
}()
wg.Add(1)
go func() {
defer wg.Done()
for err := range producer.Errors() {
log.Println(err)
errors++
}
}()
ProducerLoop:
for {
message := &ProducerMessage{Topic: "my_topic", Value: StringEncoder("testing 123")}
select {
case producer.Input() <- message:
enqueued++
case <-signals:
producer.AsyncClose() // Trigger a shutdown of the producer.
break ProducerLoop
}
}
wg.Wait()
log.Printf("Successfully produced: %d; errors: %d\n", successes, errors)
}

View File

@@ -1,385 +0,0 @@
package sarama
import (
"crypto/tls"
"fmt"
"io"
"net"
"strconv"
"sync"
"sync/atomic"
"time"
)
// Broker represents a single Kafka broker connection. All operations on this object are entirely concurrency-safe.
type Broker struct {
id int32
addr string
conf *Config
correlationID int32
conn net.Conn
connErr error
lock sync.Mutex
opened int32
responses chan responsePromise
done chan bool
}
type responsePromise struct {
correlationID int32
packets chan []byte
errors chan error
}
// NewBroker creates and returns a Broker targetting the given host:port address.
// This does not attempt to actually connect, you have to call Open() for that.
func NewBroker(addr string) *Broker {
return &Broker{id: -1, addr: addr}
}
// Open tries to connect to the Broker if it is not already connected or connecting, but does not block
// waiting for the connection to complete. This means that any subsequent operations on the broker will
// block waiting for the connection to succeed or fail. To get the effect of a fully synchronous Open call,
// follow it by a call to Connected(). The only errors Open will return directly are ConfigurationError or
// AlreadyConnected. If conf is nil, the result of NewConfig() is used.
func (b *Broker) Open(conf *Config) error {
if conf == nil {
conf = NewConfig()
}
err := conf.Validate()
if err != nil {
return err
}
if !atomic.CompareAndSwapInt32(&b.opened, 0, 1) {
return ErrAlreadyConnected
}
b.lock.Lock()
if b.conn != nil {
b.lock.Unlock()
Logger.Printf("Failed to connect to broker %s: %s\n", b.addr, ErrAlreadyConnected)
return ErrAlreadyConnected
}
go withRecover(func() {
defer b.lock.Unlock()
dialer := net.Dialer{
Timeout: conf.Net.DialTimeout,
KeepAlive: conf.Net.KeepAlive,
}
if conf.Net.TLS.Enable {
b.conn, b.connErr = tls.DialWithDialer(&dialer, "tcp", b.addr, conf.Net.TLS.Config)
} else {
b.conn, b.connErr = dialer.Dial("tcp", b.addr)
}
if b.connErr != nil {
b.conn = nil
atomic.StoreInt32(&b.opened, 0)
Logger.Printf("Failed to connect to broker %s: %s\n", b.addr, b.connErr)
return
}
b.conf = conf
b.done = make(chan bool)
b.responses = make(chan responsePromise, b.conf.Net.MaxOpenRequests-1)
if b.id >= 0 {
Logger.Printf("Connected to broker at %s (registered as #%d)\n", b.addr, b.id)
} else {
Logger.Printf("Connected to broker at %s (unregistered)\n", b.addr)
}
go withRecover(b.responseReceiver)
})
return nil
}
// Connected returns true if the broker is connected and false otherwise. If the broker is not
// connected but it had tried to connect, the error from that connection attempt is also returned.
func (b *Broker) Connected() (bool, error) {
b.lock.Lock()
defer b.lock.Unlock()
return b.conn != nil, b.connErr
}
func (b *Broker) Close() error {
b.lock.Lock()
defer b.lock.Unlock()
if b.conn == nil {
return ErrNotConnected
}
close(b.responses)
<-b.done
err := b.conn.Close()
b.conn = nil
b.connErr = nil
b.done = nil
b.responses = nil
atomic.StoreInt32(&b.opened, 0)
if err == nil {
Logger.Printf("Closed connection to broker %s\n", b.addr)
} else {
Logger.Printf("Error while closing connection to broker %s: %s\n", b.addr, err)
}
return err
}
// ID returns the broker ID retrieved from Kafka's metadata, or -1 if that is not known.
func (b *Broker) ID() int32 {
return b.id
}
// Addr returns the broker address as either retrieved from Kafka's metadata or passed to NewBroker.
func (b *Broker) Addr() string {
return b.addr
}
func (b *Broker) GetMetadata(request *MetadataRequest) (*MetadataResponse, error) {
response := new(MetadataResponse)
err := b.sendAndReceive(request, response)
if err != nil {
return nil, err
}
return response, nil
}
func (b *Broker) GetConsumerMetadata(request *ConsumerMetadataRequest) (*ConsumerMetadataResponse, error) {
response := new(ConsumerMetadataResponse)
err := b.sendAndReceive(request, response)
if err != nil {
return nil, err
}
return response, nil
}
func (b *Broker) GetAvailableOffsets(request *OffsetRequest) (*OffsetResponse, error) {
response := new(OffsetResponse)
err := b.sendAndReceive(request, response)
if err != nil {
return nil, err
}
return response, nil
}
func (b *Broker) Produce(request *ProduceRequest) (*ProduceResponse, error) {
var response *ProduceResponse
var err error
if request.RequiredAcks == NoResponse {
err = b.sendAndReceive(request, nil)
} else {
response = new(ProduceResponse)
err = b.sendAndReceive(request, response)
}
if err != nil {
return nil, err
}
return response, nil
}
func (b *Broker) Fetch(request *FetchRequest) (*FetchResponse, error) {
response := new(FetchResponse)
err := b.sendAndReceive(request, response)
if err != nil {
return nil, err
}
return response, nil
}
func (b *Broker) CommitOffset(request *OffsetCommitRequest) (*OffsetCommitResponse, error) {
response := new(OffsetCommitResponse)
err := b.sendAndReceive(request, response)
if err != nil {
return nil, err
}
return response, nil
}
func (b *Broker) FetchOffset(request *OffsetFetchRequest) (*OffsetFetchResponse, error) {
response := new(OffsetFetchResponse)
err := b.sendAndReceive(request, response)
if err != nil {
return nil, err
}
return response, nil
}
func (b *Broker) send(rb requestBody, promiseResponse bool) (*responsePromise, error) {
b.lock.Lock()
defer b.lock.Unlock()
if b.conn == nil {
if b.connErr != nil {
return nil, b.connErr
}
return nil, ErrNotConnected
}
req := &request{correlationID: b.correlationID, clientID: b.conf.ClientID, body: rb}
buf, err := encode(req)
if err != nil {
return nil, err
}
err = b.conn.SetWriteDeadline(time.Now().Add(b.conf.Net.WriteTimeout))
if err != nil {
return nil, err
}
_, err = b.conn.Write(buf)
if err != nil {
return nil, err
}
b.correlationID++
if !promiseResponse {
return nil, nil
}
promise := responsePromise{req.correlationID, make(chan []byte), make(chan error)}
b.responses <- promise
return &promise, nil
}
func (b *Broker) sendAndReceive(req requestBody, res decoder) error {
promise, err := b.send(req, res != nil)
if err != nil {
return err
}
if promise == nil {
return nil
}
select {
case buf := <-promise.packets:
return decode(buf, res)
case err = <-promise.errors:
return err
}
}
func (b *Broker) decode(pd packetDecoder) (err error) {
b.id, err = pd.getInt32()
if err != nil {
return err
}
host, err := pd.getString()
if err != nil {
return err
}
port, err := pd.getInt32()
if err != nil {
return err
}
b.addr = fmt.Sprint(host, ":", port)
return nil
}
func (b *Broker) encode(pe packetEncoder) (err error) {
host, portstr, err := net.SplitHostPort(b.addr)
if err != nil {
return err
}
port, err := strconv.Atoi(portstr)
if err != nil {
return err
}
pe.putInt32(b.id)
err = pe.putString(host)
if err != nil {
return err
}
pe.putInt32(int32(port))
return nil
}
func (b *Broker) responseReceiver() {
header := make([]byte, 8)
for response := range b.responses {
err := b.conn.SetReadDeadline(time.Now().Add(b.conf.Net.ReadTimeout))
if err != nil {
response.errors <- err
continue
}
_, err = io.ReadFull(b.conn, header)
if err != nil {
response.errors <- err
continue
}
decodedHeader := responseHeader{}
err = decode(header, &decodedHeader)
if err != nil {
response.errors <- err
continue
}
if decodedHeader.correlationID != response.correlationID {
// TODO if decoded ID < cur ID, discard until we catch up
// TODO if decoded ID > cur ID, save it so when cur ID catches up we have a response
response.errors <- PacketDecodingError{fmt.Sprintf("correlation ID didn't match, wanted %d, got %d", response.correlationID, decodedHeader.correlationID)}
continue
}
buf := make([]byte, decodedHeader.length-4)
_, err = io.ReadFull(b.conn, buf)
if err != nil {
// XXX: the above ReadFull call inherits the same ReadDeadline set at the top of this loop, so it may
// fail with a timeout error. If this happens, our connection is permanently toast since we will no longer
// be aligned correctly on the stream (we'll be reading garbage Kafka headers from the middle of data).
// Can we/should we fail harder in that case?
response.errors <- err
continue
}
response.packets <- buf
}
close(b.done)
}

View File

@@ -1,177 +0,0 @@
package sarama
import (
"fmt"
"testing"
)
func ExampleBroker() error {
broker := NewBroker("localhost:9092")
err := broker.Open(nil)
if err != nil {
return err
}
request := MetadataRequest{Topics: []string{"myTopic"}}
response, err := broker.GetMetadata(&request)
if err != nil {
_ = broker.Close()
return err
}
fmt.Println("There are", len(response.Topics), "topics active in the cluster.")
return broker.Close()
}
type mockEncoder struct {
bytes []byte
}
func (m mockEncoder) encode(pe packetEncoder) error {
return pe.putRawBytes(m.bytes)
}
func TestBrokerAccessors(t *testing.T) {
broker := NewBroker("abc:123")
if broker.ID() != -1 {
t.Error("New broker didn't have an ID of -1.")
}
if broker.Addr() != "abc:123" {
t.Error("New broker didn't have the correct address")
}
broker.id = 34
if broker.ID() != 34 {
t.Error("Manually setting broker ID did not take effect.")
}
}
func TestSimpleBrokerCommunication(t *testing.T) {
mb := newMockBroker(t, 0)
defer mb.Close()
broker := NewBroker(mb.Addr())
err := broker.Open(nil)
if err != nil {
t.Fatal(err)
}
for _, tt := range brokerTestTable {
mb.Returns(&mockEncoder{tt.response})
}
for _, tt := range brokerTestTable {
tt.runner(t, broker)
}
err = broker.Close()
if err != nil {
t.Error(err)
}
}
// We're not testing encoding/decoding here, so most of the requests/responses will be empty for simplicity's sake
var brokerTestTable = []struct {
response []byte
runner func(*testing.T, *Broker)
}{
{[]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
func(t *testing.T, broker *Broker) {
request := MetadataRequest{}
response, err := broker.GetMetadata(&request)
if err != nil {
t.Error(err)
}
if response == nil {
t.Error("Metadata request got no response!")
}
}},
{[]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 't', 0x00, 0x00, 0x00, 0x00},
func(t *testing.T, broker *Broker) {
request := ConsumerMetadataRequest{}
response, err := broker.GetConsumerMetadata(&request)
if err != nil {
t.Error(err)
}
if response == nil {
t.Error("Consumer Metadata request got no response!")
}
}},
{[]byte{},
func(t *testing.T, broker *Broker) {
request := ProduceRequest{}
request.RequiredAcks = NoResponse
response, err := broker.Produce(&request)
if err != nil {
t.Error(err)
}
if response != nil {
t.Error("Produce request with NoResponse got a response!")
}
}},
{[]byte{0x00, 0x00, 0x00, 0x00},
func(t *testing.T, broker *Broker) {
request := ProduceRequest{}
request.RequiredAcks = WaitForLocal
response, err := broker.Produce(&request)
if err != nil {
t.Error(err)
}
if response == nil {
t.Error("Produce request without NoResponse got no response!")
}
}},
{[]byte{0x00, 0x00, 0x00, 0x00},
func(t *testing.T, broker *Broker) {
request := FetchRequest{}
response, err := broker.Fetch(&request)
if err != nil {
t.Error(err)
}
if response == nil {
t.Error("Fetch request got no response!")
}
}},
{[]byte{0x00, 0x00, 0x00, 0x00},
func(t *testing.T, broker *Broker) {
request := OffsetFetchRequest{}
response, err := broker.FetchOffset(&request)
if err != nil {
t.Error(err)
}
if response == nil {
t.Error("OffsetFetch request got no response!")
}
}},
{[]byte{0x00, 0x00, 0x00, 0x00},
func(t *testing.T, broker *Broker) {
request := OffsetCommitRequest{}
response, err := broker.CommitOffset(&request)
if err != nil {
t.Error(err)
}
if response == nil {
t.Error("OffsetCommit request got no response!")
}
}},
{[]byte{0x00, 0x00, 0x00, 0x00},
func(t *testing.T, broker *Broker) {
request := OffsetRequest{}
response, err := broker.GetAvailableOffsets(&request)
if err != nil {
t.Error(err)
}
if response == nil {
t.Error("Offset request got no response!")
}
}},
}

View File

@@ -1,727 +0,0 @@
package sarama
import (
"math/rand"
"sort"
"sync"
"time"
)
// Client is a generic Kafka client. It manages connections to one or more Kafka brokers.
// You MUST call Close() on a client to avoid leaks, it will not be garbage-collected
// automatically when it passes out of scope. A single client can be safely shared by
// multiple concurrent Producers and Consumers.
type Client interface {
// Config returns the Config struct of the client. This struct should not be altered after it
// has been created.
Config() *Config
// Topics returns the set of available topics as retrieved from the cluster metadata.
Topics() ([]string, error)
// Partitions returns the sorted list of all partition IDs for the given topic.
Partitions(topic string) ([]int32, error)
// WritablePartitions returns the sorted list of all writable partition IDs for the given topic,
// where "writable" means "having a valid leader accepting writes".
WritablePartitions(topic string) ([]int32, error)
// Leader returns the broker object that is the leader of the current topic/partition, as
// determined by querying the cluster metadata.
Leader(topic string, partitionID int32) (*Broker, error)
// Replicas returns the set of all replica IDs for the given partition.
Replicas(topic string, partitionID int32) ([]int32, error)
// RefreshMetadata takes a list of topics and queries the cluster to refresh the
// available metadata for those topics. If no topics are provided, it will refresh metadata
// for all topics.
RefreshMetadata(topics ...string) error
// GetOffset queries the cluster to get the most recent available offset at the given
// time on the topic/partition combination. Time should be OffsetOldest for the earliest available
// offset, OffsetNewest for the offset of the message that will be produced next, or a time.
GetOffset(topic string, partitionID int32, time int64) (int64, error)
// Coordinator returns the coordinating broker for a consumer group. It will return a locally cached
// value if it's available. You can call RefreshCoordinator to update the cached value.
// This function only works on Kafka 0.8.2 and higher.
Coordinator(consumerGroup string) (*Broker, error)
// RefreshCoordinator retrieves the coordinator for a consumer group and stores it in local cache.
// This function only works on Kafka 0.8.2 and higher.
RefreshCoordinator(consumerGroup string) error
// Close shuts down all broker connections managed by this client. It is required to call this function before
// a client object passes out of scope, as it will otherwise leak memory. You must close any Producers or Consumers
// using a client before you close the client.
Close() error
// Closed returns true if the client has already had Close called on it
Closed() bool
}
const (
// OffsetNewest stands for the log head offset, i.e. the offset that will be assigned to the next message
// that will be produced to the partition. You can send this to a client's GetOffset method to get this
// offset, or when calling ConsumePartition to start consuming new messages.
OffsetNewest int64 = -1
// OffsetOldest stands for the oldest offset available on the broker for a partition. You can send this
// to a client's GetOffset method to get this offset, or when calling ConsumePartition to start consuming
// from the oldest offset that is still available on the broker.
OffsetOldest int64 = -2
)
type client struct {
conf *Config
closer, closed chan none // for shutting down background metadata updater
// the broker addresses given to us through the constructor are not guaranteed to be returned in
// the cluster metadata (I *think* it only returns brokers who are currently leading partitions?)
// so we store them separately
seedBrokers []*Broker
deadSeeds []*Broker
brokers map[int32]*Broker // maps broker ids to brokers
metadata map[string]map[int32]*PartitionMetadata // maps topics to partition ids to metadata
coordinators map[string]int32 // Maps consumer group names to coordinating broker IDs
// If the number of partitions is large, we can get some churn calling cachedPartitions,
// so the result is cached. It is important to update this value whenever metadata is changed
cachedPartitionsResults map[string][maxPartitionIndex][]int32
lock sync.RWMutex // protects access to the maps that hold cluster state.
}
// NewClient creates a new Client. It connects to one of the given broker addresses
// and uses that broker to automatically fetch metadata on the rest of the kafka cluster. If metadata cannot
// be retrieved from any of the given broker addresses, the client is not created.
func NewClient(addrs []string, conf *Config) (Client, error) {
Logger.Println("Initializing new client")
if conf == nil {
conf = NewConfig()
}
if err := conf.Validate(); err != nil {
return nil, err
}
if len(addrs) < 1 {
return nil, ConfigurationError("You must provide at least one broker address")
}
client := &client{
conf: conf,
closer: make(chan none),
closed: make(chan none),
brokers: make(map[int32]*Broker),
metadata: make(map[string]map[int32]*PartitionMetadata),
cachedPartitionsResults: make(map[string][maxPartitionIndex][]int32),
coordinators: make(map[string]int32),
}
random := rand.New(rand.NewSource(time.Now().UnixNano()))
for _, index := range random.Perm(len(addrs)) {
client.seedBrokers = append(client.seedBrokers, NewBroker(addrs[index]))
}
// do an initial fetch of all cluster metadata by specifing an empty list of topics
err := client.RefreshMetadata()
switch err {
case nil:
break
case ErrLeaderNotAvailable, ErrReplicaNotAvailable:
// indicates that maybe part of the cluster is down, but is not fatal to creating the client
Logger.Println(err)
default:
close(client.closed) // we haven't started the background updater yet, so we have to do this manually
_ = client.Close()
return nil, err
}
go withRecover(client.backgroundMetadataUpdater)
Logger.Println("Successfully initialized new client")
return client, nil
}
func (client *client) Config() *Config {
return client.conf
}
func (client *client) Close() error {
if client.Closed() {
// Chances are this is being called from a defer() and the error will go unobserved
// so we go ahead and log the event in this case.
Logger.Printf("Close() called on already closed client")
return ErrClosedClient
}
// shutdown and wait for the background thread before we take the lock, to avoid races
close(client.closer)
<-client.closed
client.lock.Lock()
defer client.lock.Unlock()
Logger.Println("Closing Client")
for _, broker := range client.brokers {
safeAsyncClose(broker)
}
for _, broker := range client.seedBrokers {
safeAsyncClose(broker)
}
client.brokers = nil
client.metadata = nil
return nil
}
func (client *client) Closed() bool {
return client.brokers == nil
}
func (client *client) Topics() ([]string, error) {
if client.Closed() {
return nil, ErrClosedClient
}
client.lock.RLock()
defer client.lock.RUnlock()
ret := make([]string, 0, len(client.metadata))
for topic := range client.metadata {
ret = append(ret, topic)
}
return ret, nil
}
func (client *client) Partitions(topic string) ([]int32, error) {
if client.Closed() {
return nil, ErrClosedClient
}
partitions := client.cachedPartitions(topic, allPartitions)
if len(partitions) == 0 {
err := client.RefreshMetadata(topic)
if err != nil {
return nil, err
}
partitions = client.cachedPartitions(topic, allPartitions)
}
if partitions == nil {
return nil, ErrUnknownTopicOrPartition
}
return partitions, nil
}
func (client *client) WritablePartitions(topic string) ([]int32, error) {
if client.Closed() {
return nil, ErrClosedClient
}
partitions := client.cachedPartitions(topic, writablePartitions)
// len==0 catches when it's nil (no such topic) and the odd case when every single
// partition is undergoing leader election simultaneously. Callers have to be able to handle
// this function returning an empty slice (which is a valid return value) but catching it
// here the first time (note we *don't* catch it below where we return ErrUnknownTopicOrPartition) triggers
// a metadata refresh as a nicety so callers can just try again and don't have to manually
// trigger a refresh (otherwise they'd just keep getting a stale cached copy).
if len(partitions) == 0 {
err := client.RefreshMetadata(topic)
if err != nil {
return nil, err
}
partitions = client.cachedPartitions(topic, writablePartitions)
}
if partitions == nil {
return nil, ErrUnknownTopicOrPartition
}
return partitions, nil
}
func (client *client) Replicas(topic string, partitionID int32) ([]int32, error) {
if client.Closed() {
return nil, ErrClosedClient
}
metadata := client.cachedMetadata(topic, partitionID)
if metadata == nil {
err := client.RefreshMetadata(topic)
if err != nil {
return nil, err
}
metadata = client.cachedMetadata(topic, partitionID)
}
if metadata == nil {
return nil, ErrUnknownTopicOrPartition
}
if metadata.Err == ErrReplicaNotAvailable {
return nil, metadata.Err
}
return dupeAndSort(metadata.Replicas), nil
}
func (client *client) Leader(topic string, partitionID int32) (*Broker, error) {
if client.Closed() {
return nil, ErrClosedClient
}
leader, err := client.cachedLeader(topic, partitionID)
if leader == nil {
err := client.RefreshMetadata(topic)
if err != nil {
return nil, err
}
leader, err = client.cachedLeader(topic, partitionID)
}
return leader, err
}
func (client *client) RefreshMetadata(topics ...string) error {
if client.Closed() {
return ErrClosedClient
}
// Prior to 0.8.2, Kafka will throw exceptions on an empty topic and not return a proper
// error. This handles the case by returning an error instead of sending it
// off to Kafka. See: https://github.com/Shopify/sarama/pull/38#issuecomment-26362310
for _, topic := range topics {
if len(topic) == 0 {
return ErrInvalidTopic // this is the error that 0.8.2 and later correctly return
}
}
return client.tryRefreshMetadata(topics, client.conf.Metadata.Retry.Max)
}
func (client *client) GetOffset(topic string, partitionID int32, time int64) (int64, error) {
if client.Closed() {
return -1, ErrClosedClient
}
offset, err := client.getOffset(topic, partitionID, time)
if err != nil {
if err := client.RefreshMetadata(topic); err != nil {
return -1, err
}
return client.getOffset(topic, partitionID, time)
}
return offset, err
}
func (client *client) Coordinator(consumerGroup string) (*Broker, error) {
if client.Closed() {
return nil, ErrClosedClient
}
coordinator := client.cachedCoordinator(consumerGroup)
if coordinator == nil {
if err := client.RefreshCoordinator(consumerGroup); err != nil {
return nil, err
}
coordinator = client.cachedCoordinator(consumerGroup)
}
if coordinator == nil {
return nil, ErrConsumerCoordinatorNotAvailable
}
_ = coordinator.Open(client.conf)
return coordinator, nil
}
func (client *client) RefreshCoordinator(consumerGroup string) error {
if client.Closed() {
return ErrClosedClient
}
response, err := client.getConsumerMetadata(consumerGroup, client.conf.Metadata.Retry.Max)
if err != nil {
return err
}
client.lock.Lock()
defer client.lock.Unlock()
client.registerBroker(response.Coordinator)
client.coordinators[consumerGroup] = response.Coordinator.ID()
return nil
}
// private broker management helpers
// registerBroker makes sure a broker received by a Metadata or Coordinator request is registered
// in the brokers map. It returns the broker that is registered, which may be the provided broker,
// or a previously registered Broker instance. You must hold the write lock before calling this function.
func (client *client) registerBroker(broker *Broker) {
if client.brokers[broker.ID()] == nil {
client.brokers[broker.ID()] = broker
Logger.Printf("client/brokers registered new broker #%d at %s", broker.ID(), broker.Addr())
} else if broker.Addr() != client.brokers[broker.ID()].Addr() {
safeAsyncClose(client.brokers[broker.ID()])
client.brokers[broker.ID()] = broker
Logger.Printf("client/brokers replaced registered broker #%d with %s", broker.ID(), broker.Addr())
}
}
// deregisterBroker removes a broker from the seedsBroker list, and if it's
// not the seedbroker, removes it from brokers map completely.
func (client *client) deregisterBroker(broker *Broker) {
client.lock.Lock()
defer client.lock.Unlock()
if len(client.seedBrokers) > 0 && broker == client.seedBrokers[0] {
client.deadSeeds = append(client.deadSeeds, broker)
client.seedBrokers = client.seedBrokers[1:]
} else {
// we do this so that our loop in `tryRefreshMetadata` doesn't go on forever,
// but we really shouldn't have to; once that loop is made better this case can be
// removed, and the function generally can be renamed from `deregisterBroker` to
// `nextSeedBroker` or something
Logger.Printf("client/brokers deregistered broker #%d at %s", broker.ID(), broker.Addr())
delete(client.brokers, broker.ID())
}
}
func (client *client) resurrectDeadBrokers() {
client.lock.Lock()
defer client.lock.Unlock()
Logger.Printf("client/brokers resurrecting %d dead seed brokers", len(client.deadSeeds))
client.seedBrokers = append(client.seedBrokers, client.deadSeeds...)
client.deadSeeds = nil
}
func (client *client) any() *Broker {
client.lock.RLock()
defer client.lock.RUnlock()
if len(client.seedBrokers) > 0 {
_ = client.seedBrokers[0].Open(client.conf)
return client.seedBrokers[0]
}
// not guaranteed to be random *or* deterministic
for _, broker := range client.brokers {
_ = broker.Open(client.conf)
return broker
}
return nil
}
// private caching/lazy metadata helpers
type partitionType int
const (
allPartitions partitionType = iota
writablePartitions
// If you add any more types, update the partition cache in update()
// Ensure this is the last partition type value
maxPartitionIndex
)
func (client *client) cachedMetadata(topic string, partitionID int32) *PartitionMetadata {
client.lock.RLock()
defer client.lock.RUnlock()
partitions := client.metadata[topic]
if partitions != nil {
return partitions[partitionID]
}
return nil
}
func (client *client) cachedPartitions(topic string, partitionSet partitionType) []int32 {
client.lock.RLock()
defer client.lock.RUnlock()
partitions, exists := client.cachedPartitionsResults[topic]
if !exists {
return nil
}
return partitions[partitionSet]
}
func (client *client) setPartitionCache(topic string, partitionSet partitionType) []int32 {
partitions := client.metadata[topic]
if partitions == nil {
return nil
}
ret := make([]int32, 0, len(partitions))
for _, partition := range partitions {
if partitionSet == writablePartitions && partition.Err == ErrLeaderNotAvailable {
continue
}
ret = append(ret, partition.ID)
}
sort.Sort(int32Slice(ret))
return ret
}
func (client *client) cachedLeader(topic string, partitionID int32) (*Broker, error) {
client.lock.RLock()
defer client.lock.RUnlock()
partitions := client.metadata[topic]
if partitions != nil {
metadata, ok := partitions[partitionID]
if ok {
if metadata.Err == ErrLeaderNotAvailable {
return nil, ErrLeaderNotAvailable
}
b := client.brokers[metadata.Leader]
if b == nil {
return nil, ErrLeaderNotAvailable
}
_ = b.Open(client.conf)
return b, nil
}
}
return nil, ErrUnknownTopicOrPartition
}
func (client *client) getOffset(topic string, partitionID int32, time int64) (int64, error) {
broker, err := client.Leader(topic, partitionID)
if err != nil {
return -1, err
}
request := &OffsetRequest{}
request.AddBlock(topic, partitionID, time, 1)
response, err := broker.GetAvailableOffsets(request)
if err != nil {
_ = broker.Close()
return -1, err
}
block := response.GetBlock(topic, partitionID)
if block == nil {
_ = broker.Close()
return -1, ErrIncompleteResponse
}
if block.Err != ErrNoError {
return -1, block.Err
}
if len(block.Offsets) != 1 {
return -1, ErrOffsetOutOfRange
}
return block.Offsets[0], nil
}
// core metadata update logic
func (client *client) backgroundMetadataUpdater() {
defer close(client.closed)
if client.conf.Metadata.RefreshFrequency == time.Duration(0) {
return
}
ticker := time.NewTicker(client.conf.Metadata.RefreshFrequency)
defer ticker.Stop()
for {
select {
case <-ticker.C:
if err := client.RefreshMetadata(); err != nil {
Logger.Println("Client background metadata update:", err)
}
case <-client.closer:
return
}
}
}
func (client *client) tryRefreshMetadata(topics []string, attemptsRemaining int) error {
retry := func(err error) error {
if attemptsRemaining > 0 {
Logger.Printf("client/metadata retrying after %dms... (%d attempts remaining)\n", client.conf.Metadata.Retry.Backoff/time.Millisecond, attemptsRemaining)
time.Sleep(client.conf.Metadata.Retry.Backoff)
return client.tryRefreshMetadata(topics, attemptsRemaining-1)
}
return err
}
for broker := client.any(); broker != nil; broker = client.any() {
if len(topics) > 0 {
Logger.Printf("client/metadata fetching metadata for %v from broker %s\n", topics, broker.addr)
} else {
Logger.Printf("client/metadata fetching metadata for all topics from broker %s\n", broker.addr)
}
response, err := broker.GetMetadata(&MetadataRequest{Topics: topics})
switch err.(type) {
case nil:
// valid response, use it
if shouldRetry, err := client.updateMetadata(response); shouldRetry {
Logger.Println("client/metadata found some partitions to be leaderless")
return retry(err) // note: err can be nil
} else {
return err
}
case PacketEncodingError:
// didn't even send, return the error
return err
default:
// some other error, remove that broker and try again
Logger.Println("client/metadata got error from broker while fetching metadata:", err)
_ = broker.Close()
client.deregisterBroker(broker)
}
}
Logger.Println("client/metadata no available broker to send metadata request to")
client.resurrectDeadBrokers()
return retry(ErrOutOfBrokers)
}
// if no fatal error, returns a list of topics that need retrying due to ErrLeaderNotAvailable
func (client *client) updateMetadata(data *MetadataResponse) (retry bool, err error) {
client.lock.Lock()
defer client.lock.Unlock()
// For all the brokers we received:
// - if it is a new ID, save it
// - if it is an existing ID, but the address we have is stale, discard the old one and save it
// - otherwise ignore it, replacing our existing one would just bounce the connection
for _, broker := range data.Brokers {
client.registerBroker(broker)
}
for _, topic := range data.Topics {
delete(client.metadata, topic.Name)
delete(client.cachedPartitionsResults, topic.Name)
switch topic.Err {
case ErrNoError:
break
case ErrInvalidTopic: // don't retry, don't store partial results
err = topic.Err
continue
case ErrUnknownTopicOrPartition: // retry, do not store partial partition results
err = topic.Err
retry = true
continue
case ErrLeaderNotAvailable: // retry, but store partial partition results
retry = true
break
default: // don't retry, don't store partial results
Logger.Printf("Unexpected topic-level metadata error: %s", topic.Err)
err = topic.Err
continue
}
client.metadata[topic.Name] = make(map[int32]*PartitionMetadata, len(topic.Partitions))
for _, partition := range topic.Partitions {
client.metadata[topic.Name][partition.ID] = partition
if partition.Err == ErrLeaderNotAvailable {
retry = true
}
}
var partitionCache [maxPartitionIndex][]int32
partitionCache[allPartitions] = client.setPartitionCache(topic.Name, allPartitions)
partitionCache[writablePartitions] = client.setPartitionCache(topic.Name, writablePartitions)
client.cachedPartitionsResults[topic.Name] = partitionCache
}
return
}
func (client *client) cachedCoordinator(consumerGroup string) *Broker {
client.lock.RLock()
defer client.lock.RUnlock()
if coordinatorID, ok := client.coordinators[consumerGroup]; !ok {
return nil
} else {
return client.brokers[coordinatorID]
}
}
func (client *client) getConsumerMetadata(consumerGroup string, attemptsRemaining int) (*ConsumerMetadataResponse, error) {
retry := func(err error) (*ConsumerMetadataResponse, error) {
if attemptsRemaining > 0 {
Logger.Printf("client/coordinator retrying after %dms... (%d attempts remaining)\n", client.conf.Metadata.Retry.Backoff/time.Millisecond, attemptsRemaining)
time.Sleep(client.conf.Metadata.Retry.Backoff)
return client.getConsumerMetadata(consumerGroup, attemptsRemaining-1)
}
return nil, err
}
for broker := client.any(); broker != nil; broker = client.any() {
Logger.Printf("client/coordinator requesting coordinator for consumergoup %s from %s\n", consumerGroup, broker.Addr())
request := new(ConsumerMetadataRequest)
request.ConsumerGroup = consumerGroup
response, err := broker.GetConsumerMetadata(request)
if err != nil {
Logger.Printf("client/coordinator request to broker %s failed: %s\n", broker.Addr(), err)
switch err.(type) {
case PacketEncodingError:
return nil, err
default:
_ = broker.Close()
client.deregisterBroker(broker)
continue
}
}
switch response.Err {
case ErrNoError:
Logger.Printf("client/coordinator coordinator for consumergoup %s is #%d (%s)\n", consumerGroup, response.Coordinator.ID(), response.Coordinator.Addr())
return response, nil
case ErrConsumerCoordinatorNotAvailable:
Logger.Printf("client/coordinator coordinator for consumer group %s is not available\n", consumerGroup)
// This is very ugly, but this scenario will only happen once per cluster.
// The __consumer_offsets topic only has to be created one time.
// The number of partitions not configurable, but partition 0 should always exist.
if _, err := client.Leader("__consumer_offsets", 0); err != nil {
Logger.Printf("client/coordinator the __consumer_offsets topic is not initialized completely yet. Waiting 2 seconds...\n")
time.Sleep(2 * time.Second)
}
return retry(ErrConsumerCoordinatorNotAvailable)
default:
return nil, response.Err
}
}
Logger.Println("client/coordinator no available broker to send consumer metadata request to")
client.resurrectDeadBrokers()
return retry(ErrOutOfBrokers)
}

View File

@@ -1,608 +0,0 @@
package sarama
import (
"io"
"sync"
"testing"
"time"
)
func safeClose(t testing.TB, c io.Closer) {
err := c.Close()
if err != nil {
t.Error(err)
}
}
func TestSimpleClient(t *testing.T) {
seedBroker := newMockBroker(t, 1)
seedBroker.Returns(new(MetadataResponse))
client, err := NewClient([]string{seedBroker.Addr()}, nil)
if err != nil {
t.Fatal(err)
}
seedBroker.Close()
safeClose(t, client)
}
func TestCachedPartitions(t *testing.T) {
seedBroker := newMockBroker(t, 1)
replicas := []int32{3, 1, 5}
isr := []int32{5, 1}
metadataResponse := new(MetadataResponse)
metadataResponse.AddBroker("localhost:12345", 2)
metadataResponse.AddTopicPartition("my_topic", 0, 2, replicas, isr, ErrNoError)
metadataResponse.AddTopicPartition("my_topic", 1, 2, replicas, isr, ErrLeaderNotAvailable)
seedBroker.Returns(metadataResponse)
config := NewConfig()
config.Metadata.Retry.Max = 0
c, err := NewClient([]string{seedBroker.Addr()}, config)
if err != nil {
t.Fatal(err)
}
client := c.(*client)
// Verify they aren't cached the same
allP := client.cachedPartitionsResults["my_topic"][allPartitions]
writeP := client.cachedPartitionsResults["my_topic"][writablePartitions]
if len(allP) == len(writeP) {
t.Fatal("Invalid lengths!")
}
tmp := client.cachedPartitionsResults["my_topic"]
// Verify we actually use the cache at all!
tmp[allPartitions] = []int32{1, 2, 3, 4}
client.cachedPartitionsResults["my_topic"] = tmp
if 4 != len(client.cachedPartitions("my_topic", allPartitions)) {
t.Fatal("Not using the cache!")
}
seedBroker.Close()
safeClose(t, client)
}
func TestClientDoesntCachePartitionsForTopicsWithErrors(t *testing.T) {
seedBroker := newMockBroker(t, 1)
replicas := []int32{seedBroker.BrokerID()}
metadataResponse := new(MetadataResponse)
metadataResponse.AddBroker(seedBroker.Addr(), seedBroker.BrokerID())
metadataResponse.AddTopicPartition("my_topic", 1, replicas[0], replicas, replicas, ErrNoError)
metadataResponse.AddTopicPartition("my_topic", 2, replicas[0], replicas, replicas, ErrNoError)
seedBroker.Returns(metadataResponse)
config := NewConfig()
config.Metadata.Retry.Max = 0
client, err := NewClient([]string{seedBroker.Addr()}, config)
if err != nil {
t.Fatal(err)
}
metadataResponse = new(MetadataResponse)
metadataResponse.AddTopic("unknown", ErrUnknownTopicOrPartition)
seedBroker.Returns(metadataResponse)
partitions, err := client.Partitions("unknown")
if err != ErrUnknownTopicOrPartition {
t.Error("Expected ErrUnknownTopicOrPartition, found", err)
}
if partitions != nil {
t.Errorf("Should return nil as partition list, found %v", partitions)
}
// Should still use the cache of a known topic
partitions, err = client.Partitions("my_topic")
if err != nil {
t.Errorf("Expected no error, found %v", err)
}
metadataResponse = new(MetadataResponse)
metadataResponse.AddTopic("unknown", ErrUnknownTopicOrPartition)
seedBroker.Returns(metadataResponse)
// Should not use cache for unknown topic
partitions, err = client.Partitions("unknown")
if err != ErrUnknownTopicOrPartition {
t.Error("Expected ErrUnknownTopicOrPartition, found", err)
}
if partitions != nil {
t.Errorf("Should return nil as partition list, found %v", partitions)
}
seedBroker.Close()
safeClose(t, client)
}
func TestClientSeedBrokers(t *testing.T) {
seedBroker := newMockBroker(t, 1)
metadataResponse := new(MetadataResponse)
metadataResponse.AddBroker("localhost:12345", 2)
seedBroker.Returns(metadataResponse)
client, err := NewClient([]string{seedBroker.Addr()}, nil)
if err != nil {
t.Fatal(err)
}
seedBroker.Close()
safeClose(t, client)
}
func TestClientMetadata(t *testing.T) {
seedBroker := newMockBroker(t, 1)
leader := newMockBroker(t, 5)
replicas := []int32{3, 1, 5}
isr := []int32{5, 1}
metadataResponse := new(MetadataResponse)
metadataResponse.AddBroker(leader.Addr(), leader.BrokerID())
metadataResponse.AddTopicPartition("my_topic", 0, leader.BrokerID(), replicas, isr, ErrNoError)
metadataResponse.AddTopicPartition("my_topic", 1, leader.BrokerID(), replicas, isr, ErrLeaderNotAvailable)
seedBroker.Returns(metadataResponse)
config := NewConfig()
config.Metadata.Retry.Max = 0
client, err := NewClient([]string{seedBroker.Addr()}, config)
if err != nil {
t.Fatal(err)
}
topics, err := client.Topics()
if err != nil {
t.Error(err)
} else if len(topics) != 1 || topics[0] != "my_topic" {
t.Error("Client returned incorrect topics:", topics)
}
parts, err := client.Partitions("my_topic")
if err != nil {
t.Error(err)
} else if len(parts) != 2 || parts[0] != 0 || parts[1] != 1 {
t.Error("Client returned incorrect partitions for my_topic:", parts)
}
parts, err = client.WritablePartitions("my_topic")
if err != nil {
t.Error(err)
} else if len(parts) != 1 || parts[0] != 0 {
t.Error("Client returned incorrect writable partitions for my_topic:", parts)
}
tst, err := client.Leader("my_topic", 0)
if err != nil {
t.Error(err)
} else if tst.ID() != 5 {
t.Error("Leader for my_topic had incorrect ID.")
}
replicas, err = client.Replicas("my_topic", 0)
if err != nil {
t.Error(err)
} else if replicas[0] != 1 {
t.Error("Incorrect (or unsorted) replica")
} else if replicas[1] != 3 {
t.Error("Incorrect (or unsorted) replica")
} else if replicas[2] != 5 {
t.Error("Incorrect (or unsorted) replica")
}
leader.Close()
seedBroker.Close()
safeClose(t, client)
}
func TestClientGetOffset(t *testing.T) {
seedBroker := newMockBroker(t, 1)
leader := newMockBroker(t, 2)
leaderAddr := leader.Addr()
metadata := new(MetadataResponse)
metadata.AddTopicPartition("foo", 0, leader.BrokerID(), nil, nil, ErrNoError)
metadata.AddBroker(leaderAddr, leader.BrokerID())
seedBroker.Returns(metadata)
client, err := NewClient([]string{seedBroker.Addr()}, nil)
if err != nil {
t.Fatal(err)
}
offsetResponse := new(OffsetResponse)
offsetResponse.AddTopicPartition("foo", 0, 123)
leader.Returns(offsetResponse)
offset, err := client.GetOffset("foo", 0, OffsetNewest)
if err != nil {
t.Error(err)
}
if offset != 123 {
t.Error("Unexpected offset, got ", offset)
}
leader.Close()
seedBroker.Returns(metadata)
leader = newMockBrokerAddr(t, 2, leaderAddr)
offsetResponse = new(OffsetResponse)
offsetResponse.AddTopicPartition("foo", 0, 456)
leader.Returns(offsetResponse)
offset, err = client.GetOffset("foo", 0, OffsetNewest)
if err != nil {
t.Error(err)
}
if offset != 456 {
t.Error("Unexpected offset, got ", offset)
}
seedBroker.Close()
leader.Close()
safeClose(t, client)
}
func TestClientReceivingUnknownTopic(t *testing.T) {
seedBroker := newMockBroker(t, 1)
metadataResponse1 := new(MetadataResponse)
seedBroker.Returns(metadataResponse1)
config := NewConfig()
config.Metadata.Retry.Max = 1
config.Metadata.Retry.Backoff = 0
client, err := NewClient([]string{seedBroker.Addr()}, config)
if err != nil {
t.Fatal(err)
}
metadataUnknownTopic := new(MetadataResponse)
metadataUnknownTopic.AddTopic("new_topic", ErrUnknownTopicOrPartition)
seedBroker.Returns(metadataUnknownTopic)
seedBroker.Returns(metadataUnknownTopic)
if err := client.RefreshMetadata("new_topic"); err != ErrUnknownTopicOrPartition {
t.Error("ErrUnknownTopicOrPartition expected, got", err)
}
// If we are asking for the leader of a partition of the non-existing topic.
// we will request metadata again.
seedBroker.Returns(metadataUnknownTopic)
seedBroker.Returns(metadataUnknownTopic)
if _, err = client.Leader("new_topic", 1); err != ErrUnknownTopicOrPartition {
t.Error("Expected ErrUnknownTopicOrPartition, got", err)
}
safeClose(t, client)
seedBroker.Close()
}
func TestClientReceivingPartialMetadata(t *testing.T) {
seedBroker := newMockBroker(t, 1)
leader := newMockBroker(t, 5)
metadataResponse1 := new(MetadataResponse)
metadataResponse1.AddBroker(leader.Addr(), leader.BrokerID())
seedBroker.Returns(metadataResponse1)
config := NewConfig()
config.Metadata.Retry.Max = 0
client, err := NewClient([]string{seedBroker.Addr()}, config)
if err != nil {
t.Fatal(err)
}
replicas := []int32{leader.BrokerID(), seedBroker.BrokerID()}
metadataPartial := new(MetadataResponse)
metadataPartial.AddTopic("new_topic", ErrLeaderNotAvailable)
metadataPartial.AddTopicPartition("new_topic", 0, leader.BrokerID(), replicas, replicas, ErrNoError)
metadataPartial.AddTopicPartition("new_topic", 1, -1, replicas, []int32{}, ErrLeaderNotAvailable)
seedBroker.Returns(metadataPartial)
if err := client.RefreshMetadata("new_topic"); err != nil {
t.Error("ErrLeaderNotAvailable should not make RefreshMetadata respond with an error")
}
// Even though the metadata was incomplete, we should be able to get the leader of a partition
// for which we did get a useful response, without doing additional requests.
partition0Leader, err := client.Leader("new_topic", 0)
if err != nil {
t.Error(err)
} else if partition0Leader.Addr() != leader.Addr() {
t.Error("Unexpected leader returned", partition0Leader.Addr())
}
// If we are asking for the leader of a partition that didn't have a leader before,
// we will do another metadata request.
seedBroker.Returns(metadataPartial)
// Still no leader for the partition, so asking for it should return an error.
_, err = client.Leader("new_topic", 1)
if err != ErrLeaderNotAvailable {
t.Error("Expected ErrLeaderNotAvailable, got", err)
}
safeClose(t, client)
seedBroker.Close()
leader.Close()
}
func TestClientRefreshBehaviour(t *testing.T) {
seedBroker := newMockBroker(t, 1)
leader := newMockBroker(t, 5)
metadataResponse1 := new(MetadataResponse)
metadataResponse1.AddBroker(leader.Addr(), leader.BrokerID())
seedBroker.Returns(metadataResponse1)
metadataResponse2 := new(MetadataResponse)
metadataResponse2.AddTopicPartition("my_topic", 0xb, leader.BrokerID(), nil, nil, ErrNoError)
seedBroker.Returns(metadataResponse2)
client, err := NewClient([]string{seedBroker.Addr()}, nil)
if err != nil {
t.Fatal(err)
}
parts, err := client.Partitions("my_topic")
if err != nil {
t.Error(err)
} else if len(parts) != 1 || parts[0] != 0xb {
t.Error("Client returned incorrect partitions for my_topic:", parts)
}
tst, err := client.Leader("my_topic", 0xb)
if err != nil {
t.Error(err)
} else if tst.ID() != 5 {
t.Error("Leader for my_topic had incorrect ID.")
}
leader.Close()
seedBroker.Close()
safeClose(t, client)
}
func TestClientResurrectDeadSeeds(t *testing.T) {
initialSeed := newMockBroker(t, 0)
emptyMetadata := new(MetadataResponse)
initialSeed.Returns(emptyMetadata)
conf := NewConfig()
conf.Metadata.Retry.Backoff = 0
conf.Metadata.RefreshFrequency = 0
c, err := NewClient([]string{initialSeed.Addr()}, conf)
if err != nil {
t.Fatal(err)
}
initialSeed.Close()
client := c.(*client)
seed1 := newMockBroker(t, 1)
seed2 := newMockBroker(t, 2)
seed3 := newMockBroker(t, 3)
addr1 := seed1.Addr()
addr2 := seed2.Addr()
addr3 := seed3.Addr()
// Overwrite the seed brokers with a fixed ordering to make this test deterministic.
safeClose(t, client.seedBrokers[0])
client.seedBrokers = []*Broker{NewBroker(addr1), NewBroker(addr2), NewBroker(addr3)}
client.deadSeeds = []*Broker{}
wg := sync.WaitGroup{}
wg.Add(1)
go func() {
if err := client.RefreshMetadata(); err != nil {
t.Error(err)
}
wg.Done()
}()
seed1.Close()
seed2.Close()
seed1 = newMockBrokerAddr(t, 1, addr1)
seed2 = newMockBrokerAddr(t, 2, addr2)
seed3.Close()
seed1.Close()
seed2.Returns(emptyMetadata)
wg.Wait()
if len(client.seedBrokers) != 2 {
t.Error("incorrect number of live seeds")
}
if len(client.deadSeeds) != 1 {
t.Error("incorrect number of dead seeds")
}
safeClose(t, c)
}
func TestClientCoordinatorWithConsumerOffsetsTopic(t *testing.T) {
seedBroker := newMockBroker(t, 1)
staleCoordinator := newMockBroker(t, 2)
freshCoordinator := newMockBroker(t, 3)
replicas := []int32{staleCoordinator.BrokerID(), freshCoordinator.BrokerID()}
metadataResponse1 := new(MetadataResponse)
metadataResponse1.AddBroker(staleCoordinator.Addr(), staleCoordinator.BrokerID())
metadataResponse1.AddBroker(freshCoordinator.Addr(), freshCoordinator.BrokerID())
metadataResponse1.AddTopicPartition("__consumer_offsets", 0, replicas[0], replicas, replicas, ErrNoError)
seedBroker.Returns(metadataResponse1)
client, err := NewClient([]string{seedBroker.Addr()}, nil)
if err != nil {
t.Fatal(err)
}
coordinatorResponse1 := new(ConsumerMetadataResponse)
coordinatorResponse1.Err = ErrConsumerCoordinatorNotAvailable
seedBroker.Returns(coordinatorResponse1)
coordinatorResponse2 := new(ConsumerMetadataResponse)
coordinatorResponse2.CoordinatorID = staleCoordinator.BrokerID()
coordinatorResponse2.CoordinatorHost = "127.0.0.1"
coordinatorResponse2.CoordinatorPort = staleCoordinator.Port()
seedBroker.Returns(coordinatorResponse2)
broker, err := client.Coordinator("my_group")
if err != nil {
t.Error(err)
}
if staleCoordinator.Addr() != broker.Addr() {
t.Errorf("Expected coordinator to have address %s, found %s", staleCoordinator.Addr(), broker.Addr())
}
if staleCoordinator.BrokerID() != broker.ID() {
t.Errorf("Expected coordinator to have ID %d, found %d", staleCoordinator.BrokerID(), broker.ID())
}
// Grab the cached value
broker2, err := client.Coordinator("my_group")
if err != nil {
t.Error(err)
}
if broker2.Addr() != broker.Addr() {
t.Errorf("Expected the coordinator to be the same, but found %s vs. %s", broker2.Addr(), broker.Addr())
}
coordinatorResponse3 := new(ConsumerMetadataResponse)
coordinatorResponse3.CoordinatorID = freshCoordinator.BrokerID()
coordinatorResponse3.CoordinatorHost = "127.0.0.1"
coordinatorResponse3.CoordinatorPort = freshCoordinator.Port()
seedBroker.Returns(coordinatorResponse3)
// Refresh the locally cahced value because it's stale
if err := client.RefreshCoordinator("my_group"); err != nil {
t.Error(err)
}
// Grab the fresh value
broker3, err := client.Coordinator("my_group")
if err != nil {
t.Error(err)
}
if broker3.Addr() != freshCoordinator.Addr() {
t.Errorf("Expected the freshCoordinator to be returned, but found %s.", broker3.Addr())
}
freshCoordinator.Close()
staleCoordinator.Close()
seedBroker.Close()
safeClose(t, client)
}
func TestClientCoordinatorWithoutConsumerOffsetsTopic(t *testing.T) {
seedBroker := newMockBroker(t, 1)
coordinator := newMockBroker(t, 2)
metadataResponse1 := new(MetadataResponse)
seedBroker.Returns(metadataResponse1)
config := NewConfig()
config.Metadata.Retry.Max = 1
config.Metadata.Retry.Backoff = 0
client, err := NewClient([]string{seedBroker.Addr()}, config)
if err != nil {
t.Fatal(err)
}
coordinatorResponse1 := new(ConsumerMetadataResponse)
coordinatorResponse1.Err = ErrConsumerCoordinatorNotAvailable
seedBroker.Returns(coordinatorResponse1)
metadataResponse2 := new(MetadataResponse)
metadataResponse2.AddTopic("__consumer_offsets", ErrUnknownTopicOrPartition)
seedBroker.Returns(metadataResponse2)
replicas := []int32{coordinator.BrokerID()}
metadataResponse3 := new(MetadataResponse)
metadataResponse3.AddTopicPartition("__consumer_offsets", 0, replicas[0], replicas, replicas, ErrNoError)
seedBroker.Returns(metadataResponse3)
coordinatorResponse2 := new(ConsumerMetadataResponse)
coordinatorResponse2.CoordinatorID = coordinator.BrokerID()
coordinatorResponse2.CoordinatorHost = "127.0.0.1"
coordinatorResponse2.CoordinatorPort = coordinator.Port()
seedBroker.Returns(coordinatorResponse2)
broker, err := client.Coordinator("my_group")
if err != nil {
t.Error(err)
}
if coordinator.Addr() != broker.Addr() {
t.Errorf("Expected coordinator to have address %s, found %s", coordinator.Addr(), broker.Addr())
}
if coordinator.BrokerID() != broker.ID() {
t.Errorf("Expected coordinator to have ID %d, found %d", coordinator.BrokerID(), broker.ID())
}
coordinator.Close()
seedBroker.Close()
safeClose(t, client)
}
func TestClientAutorefreshShutdownRace(t *testing.T) {
seedBroker := newMockBroker(t, 1)
metadataResponse := new(MetadataResponse)
seedBroker.Returns(metadataResponse)
conf := NewConfig()
conf.Metadata.RefreshFrequency = 100 * time.Millisecond
client, err := NewClient([]string{seedBroker.Addr()}, conf)
if err != nil {
t.Fatal(err)
}
// Wait for the background refresh to kick in
time.Sleep(110 * time.Millisecond)
done := make(chan none)
go func() {
// Close the client
if err := client.Close(); err != nil {
t.Fatal(err)
}
close(done)
}()
// Wait for the Close to kick in
time.Sleep(10 * time.Millisecond)
// Then return some metadata to the still-running background thread
leader := newMockBroker(t, 2)
metadataResponse.AddBroker(leader.Addr(), leader.BrokerID())
metadataResponse.AddTopicPartition("foo", 0, leader.BrokerID(), []int32{2}, []int32{2}, ErrNoError)
seedBroker.Returns(metadataResponse)
<-done
seedBroker.Close()
// give the update time to happen so we get a panic if it's still running (which it shouldn't)
time.Sleep(10 * time.Millisecond)
}

View File

@@ -1,275 +0,0 @@
package sarama
import (
"crypto/tls"
"time"
)
// Config is used to pass multiple configuration options to Sarama's constructors.
type Config struct {
// Net is the namespace for network-level properties used by the Broker, and shared by the Client/Producer/Consumer.
Net struct {
MaxOpenRequests int // How many outstanding requests a connection is allowed to have before sending on it blocks (default 5).
// All three of the below configurations are similar to the `socket.timeout.ms` setting in JVM kafka.
DialTimeout time.Duration // How long to wait for the initial connection to succeed before timing out and returning an error (default 30s).
ReadTimeout time.Duration // How long to wait for a response before timing out and returning an error (default 30s).
WriteTimeout time.Duration // How long to wait for a transmit to succeed before timing out and returning an error (default 30s).
// NOTE: these config values have no compatibility guarantees; they may change when Kafka releases its
// official TLS support in version 0.9.
TLS struct {
Enable bool // Whether or not to use TLS when connecting to the broker (defaults to false).
Config *tls.Config // The TLS configuration to use for secure connections if enabled (defaults to nil).
}
// KeepAlive specifies the keep-alive period for an active network connection.
// If zero, keep-alives are disabled. (default is 0: disabled).
KeepAlive time.Duration
}
// Metadata is the namespace for metadata management properties used by the Client, and shared by the Producer/Consumer.
Metadata struct {
Retry struct {
Max int // The total number of times to retry a metadata request when the cluster is in the middle of a leader election (default 3).
Backoff time.Duration // How long to wait for leader election to occur before retrying (default 250ms). Similar to the JVM's `retry.backoff.ms`.
}
// How frequently to refresh the cluster metadata in the background. Defaults to 10 minutes.
// Set to 0 to disable. Similar to `topic.metadata.refresh.interval.ms` in the JVM version.
RefreshFrequency time.Duration
}
// Producer is the namespace for configuration related to producing messages, used by the Producer.
Producer struct {
// The maximum permitted size of a message (defaults to 1000000). Should be set equal to or smaller than the broker's `message.max.bytes`.
MaxMessageBytes int
// The level of acknowledgement reliability needed from the broker (defaults to WaitForLocal).
// Equivalent to the `request.required.acks` setting of the JVM producer.
RequiredAcks RequiredAcks
// The maximum duration the broker will wait the receipt of the number of RequiredAcks (defaults to 10 seconds).
// This is only relevant when RequiredAcks is set to WaitForAll or a number > 1. Only supports millisecond resolution,
// nanoseconds will be truncated. Equivalent to the JVM producer's `request.timeout.ms` setting.
Timeout time.Duration
// The type of compression to use on messages (defaults to no compression). Similar to `compression.codec` setting of the JVM producer.
Compression CompressionCodec
// Generates partitioners for choosing the partition to send messages to (defaults to hashing the message key).
// Similar to the `partitioner.class` setting for the JVM producer.
Partitioner PartitionerConstructor
// Return specifies what channels will be populated. If they are set to true, you must read from
// the respective channels to prevent deadlock.
Return struct {
// If enabled, successfully delivered messages will be returned on the Successes channel (default disabled).
Successes bool
// If enabled, messages that failed to deliver will be returned on the Errors channel, including error (default enabled).
Errors bool
}
// The following config options control how often messages are batched up and sent to the broker. By default,
// messages are sent as fast as possible, and all messages received while the current batch is in-flight are placed
// into the subsequent batch.
Flush struct {
Bytes int // The best-effort number of bytes needed to trigger a flush. Use the global sarama.MaxRequestSize to set a hard upper limit.
Messages int // The best-effort number of messages needed to trigger a flush. Use `MaxMessages` to set a hard upper limit.
Frequency time.Duration // The best-effort frequency of flushes. Equivalent to `queue.buffering.max.ms` setting of JVM producer.
// The maximum number of messages the producer will send in a single broker request.
// Defaults to 0 for unlimited. Similar to `queue.buffering.max.messages` in the JVM producer.
MaxMessages int
}
Retry struct {
// The total number of times to retry sending a message (default 3).
// Similar to the `message.send.max.retries` setting of the JVM producer.
Max int
// How long to wait for the cluster to settle between retries (default 100ms).
// Similar to the `retry.backoff.ms` setting of the JVM producer.
Backoff time.Duration
}
}
// Consumer is the namespace for configuration related to consuming messages, used by the Consumer.
Consumer struct {
Retry struct {
// How long to wait after a failing to read from a partition before trying again (default 2s).
Backoff time.Duration
}
// Fetch is the namespace for controlling how many bytes are retrieved by any given request.
Fetch struct {
// The minimum number of message bytes to fetch in a request - the broker will wait until at least this many are available.
// The default is 1, as 0 causes the consumer to spin when no messages are available. Equivalent to the JVM's `fetch.min.bytes`.
Min int32
// The default number of message bytes to fetch from the broker in each request (default 32768). This should be larger than the
// majority of your messages, or else the consumer will spend a lot of time negotiating sizes and not actually consuming. Similar
// to the JVM's `fetch.message.max.bytes`.
Default int32
// The maximum number of message bytes to fetch from the broker in a single request. Messages larger than this will return
// ErrMessageTooLarge and will not be consumable, so you must be sure this is at least as large as your largest message.
// Defaults to 0 (no limit). Similar to the JVM's `fetch.message.max.bytes`. The global `sarama.MaxResponseSize` still applies.
Max int32
}
// The maximum amount of time the broker will wait for Consumer.Fetch.Min bytes to become available before it
// returns fewer than that anyways. The default is 250ms, since 0 causes the consumer to spin when no events are available.
// 100-500ms is a reasonable range for most cases. Kafka only supports precision up to milliseconds; nanoseconds will be truncated.
// Equivalent to the JVM's `fetch.wait.max.ms`.
MaxWaitTime time.Duration
// The maximum amount of time the consumer expects a message takes to process for the user. If writing to the Messages channel
// takes longer than this, that partition will stop fetching more messages until it can proceed again. Note that, since the
// Messages channel is buffered, the actual grace time is (MaxProcessingTime * ChanneBufferSize). Defaults to 100ms.
MaxProcessingTime time.Duration
// Return specifies what channels will be populated. If they are set to true, you must read from
// them to prevent deadlock.
Return struct {
// If enabled, any errors that occured while consuming are returned on the Errors channel (default disabled).
Errors bool
}
}
// A user-provided string sent with every request to the brokers for logging, debugging, and auditing purposes.
// Defaults to "sarama", but you should probably set it to something specific to your application.
ClientID string
// The number of events to buffer in internal and external channels. This permits the producer and consumer to
// continue processing some messages in the background while user code is working, greatly improving throughput.
// Defaults to 256.
ChannelBufferSize int
}
// NewConfig returns a new configuration instance with sane defaults.
func NewConfig() *Config {
c := &Config{}
c.Net.MaxOpenRequests = 5
c.Net.DialTimeout = 30 * time.Second
c.Net.ReadTimeout = 30 * time.Second
c.Net.WriteTimeout = 30 * time.Second
c.Metadata.Retry.Max = 3
c.Metadata.Retry.Backoff = 250 * time.Millisecond
c.Metadata.RefreshFrequency = 10 * time.Minute
c.Producer.MaxMessageBytes = 1000000
c.Producer.RequiredAcks = WaitForLocal
c.Producer.Timeout = 10 * time.Second
c.Producer.Partitioner = NewHashPartitioner
c.Producer.Retry.Max = 3
c.Producer.Retry.Backoff = 100 * time.Millisecond
c.Producer.Return.Errors = true
c.Consumer.Fetch.Min = 1
c.Consumer.Fetch.Default = 32768
c.Consumer.Retry.Backoff = 2 * time.Second
c.Consumer.MaxWaitTime = 250 * time.Millisecond
c.Consumer.MaxProcessingTime = 100 * time.Millisecond
c.Consumer.Return.Errors = false
c.ChannelBufferSize = 256
return c
}
// Validate checks a Config instance. It will return a
// ConfigurationError if the specified values don't make sense.
func (c *Config) Validate() error {
// some configuration values should be warned on but not fail completely, do those first
if c.Net.TLS.Enable == false && c.Net.TLS.Config != nil {
Logger.Println("Net.TLS is disabled but a non-nil configuration was provided.")
}
if c.Producer.RequiredAcks > 1 {
Logger.Println("Producer.RequiredAcks > 1 is deprecated and will raise an exception with kafka >= 0.8.2.0.")
}
if c.Producer.MaxMessageBytes >= int(MaxRequestSize) {
Logger.Println("Producer.MaxMessageBytes is larger than MaxRequestSize; it will be ignored.")
}
if c.Producer.Flush.Bytes >= int(MaxRequestSize) {
Logger.Println("Producer.Flush.Bytes is larger than MaxRequestSize; it will be ignored.")
}
if c.Producer.Timeout%time.Millisecond != 0 {
Logger.Println("Producer.Timeout only supports millisecond resolution; nanoseconds will be truncated.")
}
if c.Consumer.MaxWaitTime < 100*time.Millisecond {
Logger.Println("Consumer.MaxWaitTime is very low, which can cause high CPU and network usage. See documentation for details.")
}
if c.Consumer.MaxWaitTime%time.Millisecond != 0 {
Logger.Println("Consumer.MaxWaitTime only supports millisecond precision; nanoseconds will be truncated.")
}
if c.ClientID == "sarama" {
Logger.Println("ClientID is the default of 'sarama', you should consider setting it to something application-specific.")
}
// validate Net values
switch {
case c.Net.MaxOpenRequests <= 0:
return ConfigurationError("Net.MaxOpenRequests must be > 0")
case c.Net.DialTimeout <= 0:
return ConfigurationError("Net.DialTimeout must be > 0")
case c.Net.ReadTimeout <= 0:
return ConfigurationError("Net.ReadTimeout must be > 0")
case c.Net.WriteTimeout <= 0:
return ConfigurationError("Net.WriteTimeout must be > 0")
case c.Net.KeepAlive < 0:
return ConfigurationError("Net.KeepAlive must be >= 0")
}
// validate the Metadata values
switch {
case c.Metadata.Retry.Max < 0:
return ConfigurationError("Metadata.Retry.Max must be >= 0")
case c.Metadata.Retry.Backoff < 0:
return ConfigurationError("Metadata.Retry.Backoff must be >= 0")
case c.Metadata.RefreshFrequency < 0:
return ConfigurationError("Metadata.RefreshFrequency must be >= 0")
}
// validate the Producer values
switch {
case c.Producer.MaxMessageBytes <= 0:
return ConfigurationError("Producer.MaxMessageBytes must be > 0")
case c.Producer.RequiredAcks < -1:
return ConfigurationError("Producer.RequiredAcks must be >= -1")
case c.Producer.Timeout <= 0:
return ConfigurationError("Producer.Timeout must be > 0")
case c.Producer.Partitioner == nil:
return ConfigurationError("Producer.Partitioner must not be nil")
case c.Producer.Flush.Bytes < 0:
return ConfigurationError("Producer.Flush.Bytes must be >= 0")
case c.Producer.Flush.Messages < 0:
return ConfigurationError("Producer.Flush.Messages must be >= 0")
case c.Producer.Flush.Frequency < 0:
return ConfigurationError("Producer.Flush.Frequency must be >= 0")
case c.Producer.Flush.MaxMessages < 0:
return ConfigurationError("Producer.Flush.MaxMessages must be >= 0")
case c.Producer.Flush.MaxMessages > 0 && c.Producer.Flush.MaxMessages < c.Producer.Flush.Messages:
return ConfigurationError("Producer.Flush.MaxMessages must be >= Producer.Flush.Messages when set")
case c.Producer.Retry.Max < 0:
return ConfigurationError("Producer.Retry.Max must be >= 0")
case c.Producer.Retry.Backoff < 0:
return ConfigurationError("Producer.Retry.Backoff must be >= 0")
}
// validate the Consumer values
switch {
case c.Consumer.Fetch.Min <= 0:
return ConfigurationError("Consumer.Fetch.Min must be > 0")
case c.Consumer.Fetch.Default <= 0:
return ConfigurationError("Consumer.Fetch.Default must be > 0")
case c.Consumer.Fetch.Max < 0:
return ConfigurationError("Consumer.Fetch.Max must be >= 0")
case c.Consumer.MaxWaitTime < 1*time.Millisecond:
return ConfigurationError("Consumer.MaxWaitTime must be >= 1ms")
case c.Consumer.MaxProcessingTime <= 0:
return ConfigurationError("Consumer.MaxProcessingTime must be > 0")
case c.Consumer.Retry.Backoff < 0:
return ConfigurationError("Consumer.Retry.Backoff must be >= 0")
}
// validate misc shared values
switch {
case c.ChannelBufferSize < 0:
return ConfigurationError("ChannelBufferSize must be >= 0")
}
return nil
}

View File

@@ -1,10 +0,0 @@
package sarama
import "testing"
func TestDefaultConfigValidates(t *testing.T) {
config := NewConfig()
if err := config.Validate(); err != nil {
t.Error(err)
}
}

View File

@@ -1,676 +0,0 @@
package sarama
import (
"errors"
"fmt"
"sync"
"sync/atomic"
"time"
)
// ConsumerMessage encapsulates a Kafka message returned by the consumer.
type ConsumerMessage struct {
Key, Value []byte
Topic string
Partition int32
Offset int64
}
// ConsumerError is what is provided to the user when an error occurs.
// It wraps an error and includes the topic and partition.
type ConsumerError struct {
Topic string
Partition int32
Err error
}
func (ce ConsumerError) Error() string {
return fmt.Sprintf("kafka: error while consuming %s/%d: %s", ce.Topic, ce.Partition, ce.Err)
}
// ConsumerErrors is a type that wraps a batch of errors and implements the Error interface.
// It can be returned from the PartitionConsumer's Close methods to avoid the need to manually drain errors
// when stopping.
type ConsumerErrors []*ConsumerError
func (ce ConsumerErrors) Error() string {
return fmt.Sprintf("kafka: %d errors while consuming", len(ce))
}
// Consumer manages PartitionConsumers which process Kafka messages from brokers. You MUST call Close()
// on a consumer to avoid leaks, it will not be garbage-collected automatically when it passes out of
// scope.
//
// Sarama's Consumer type does not currently support automatic consumer group rebalancing and offset tracking,
// however the https://github.com/wvanbergen/kafka library builds on Sarama to add this support. We plan
// to properly integrate this functionality at a later date.
type Consumer interface {
// Topics returns the set of available topics as retrieved from the cluster metadata.
// This method is the same as Client.Topics(), and is provided for convenience.
Topics() ([]string, error)
// Partitions returns the sorted list of all partition IDs for the given topic.
// This method is the same as Client.Pertitions(), and is provided for convenience.
Partitions(topic string) ([]int32, error)
// ConsumePartition creates a PartitionConsumer on the given topic/partition with the given offset. It will
// return an error if this Consumer is already consuming on the given topic/partition. Offset can be a
// literal offset, or OffsetNewest or OffsetOldest
ConsumePartition(topic string, partition int32, offset int64) (PartitionConsumer, error)
// Close shuts down the consumer. It must be called after all child PartitionConsumers have already been closed.
Close() error
}
type consumer struct {
client Client
conf *Config
ownClient bool
lock sync.Mutex
children map[string]map[int32]*partitionConsumer
brokerConsumers map[*Broker]*brokerConsumer
}
// NewConsumer creates a new consumer using the given broker addresses and configuration.
func NewConsumer(addrs []string, config *Config) (Consumer, error) {
client, err := NewClient(addrs, config)
if err != nil {
return nil, err
}
c, err := NewConsumerFromClient(client)
if err != nil {
return nil, err
}
c.(*consumer).ownClient = true
return c, nil
}
// NewConsumerFromClient creates a new consumer using the given client. It is still
// necessary to call Close() on the underlying client when shutting down this consumer.
func NewConsumerFromClient(client Client) (Consumer, error) {
// Check that we are not dealing with a closed Client before processing any other arguments
if client.Closed() {
return nil, ErrClosedClient
}
c := &consumer{
client: client,
conf: client.Config(),
children: make(map[string]map[int32]*partitionConsumer),
brokerConsumers: make(map[*Broker]*brokerConsumer),
}
return c, nil
}
func (c *consumer) Close() error {
if c.ownClient {
return c.client.Close()
}
return nil
}
func (c *consumer) Topics() ([]string, error) {
return c.client.Topics()
}
func (c *consumer) Partitions(topic string) ([]int32, error) {
return c.client.Partitions(topic)
}
func (c *consumer) ConsumePartition(topic string, partition int32, offset int64) (PartitionConsumer, error) {
child := &partitionConsumer{
consumer: c,
conf: c.conf,
topic: topic,
partition: partition,
messages: make(chan *ConsumerMessage, c.conf.ChannelBufferSize),
errors: make(chan *ConsumerError, c.conf.ChannelBufferSize),
feeder: make(chan *FetchResponse, 1),
trigger: make(chan none, 1),
dying: make(chan none),
fetchSize: c.conf.Consumer.Fetch.Default,
}
if err := child.chooseStartingOffset(offset); err != nil {
return nil, err
}
var leader *Broker
var err error
if leader, err = c.client.Leader(child.topic, child.partition); err != nil {
return nil, err
}
if err := c.addChild(child); err != nil {
return nil, err
}
go withRecover(child.dispatcher)
go withRecover(child.responseFeeder)
child.broker = c.refBrokerConsumer(leader)
child.broker.input <- child
return child, nil
}
func (c *consumer) addChild(child *partitionConsumer) error {
c.lock.Lock()
defer c.lock.Unlock()
topicChildren := c.children[child.topic]
if topicChildren == nil {
topicChildren = make(map[int32]*partitionConsumer)
c.children[child.topic] = topicChildren
}
if topicChildren[child.partition] != nil {
return ConfigurationError("That topic/partition is already being consumed")
}
topicChildren[child.partition] = child
return nil
}
func (c *consumer) removeChild(child *partitionConsumer) {
c.lock.Lock()
defer c.lock.Unlock()
delete(c.children[child.topic], child.partition)
}
func (c *consumer) refBrokerConsumer(broker *Broker) *brokerConsumer {
c.lock.Lock()
defer c.lock.Unlock()
bc := c.brokerConsumers[broker]
if bc == nil {
bc = c.newBrokerConsumer(broker)
c.brokerConsumers[broker] = bc
}
bc.refs++
return bc
}
func (c *consumer) unrefBrokerConsumer(brokerWorker *brokerConsumer) {
c.lock.Lock()
defer c.lock.Unlock()
brokerWorker.refs--
if brokerWorker.refs == 0 {
close(brokerWorker.input)
if c.brokerConsumers[brokerWorker.broker] == brokerWorker {
delete(c.brokerConsumers, brokerWorker.broker)
}
}
}
func (c *consumer) abandonBrokerConsumer(brokerWorker *brokerConsumer) {
c.lock.Lock()
defer c.lock.Unlock()
delete(c.brokerConsumers, brokerWorker.broker)
}
// PartitionConsumer
// PartitionConsumer processes Kafka messages from a given topic and partition. You MUST call Close()
// or AsyncClose() on a PartitionConsumer to avoid leaks, it will not be garbage-collected automatically
// when it passes out of scope.
//
// The simplest way of using a PartitionConsumer is to loop over its Messages channel using a for/range
// loop. The PartitionConsumer will only stop itself in one case: when the offset being consumed is reported
// as out of range by the brokers. In this case you should decide what you want to do (try a different offset,
// notify a human, etc) and handle it appropriately. For all other error cases, it will just keep retrying.
// By default, it logs these errors to sarama.Logger; if you want to be notified directly of all errors, set
// your config's Consumer.Return.Errors to true and read from the Errors channel, using a select statement
// or a separate goroutine. Check out the Consumer examples to see implementations of these different approaches.
type PartitionConsumer interface {
// AsyncClose initiates a shutdown of the PartitionConsumer. This method will return immediately,
// after which you should wait until the 'messages' and 'errors' channel are drained.
// It is required to call this function, or Close before a consumer object passes out of scope,
// as it will otherwise leak memory. You must call this before calling Close on the underlying
// client.
AsyncClose()
// Close stops the PartitionConsumer from fetching messages. It is required to call this function
// (or AsyncClose) before a consumer object passes out of scope, as it will otherwise leak memory. You must
// call this before calling Close on the underlying client.
Close() error
// Messages returns the read channel for the messages that are returned by the broker.
Messages() <-chan *ConsumerMessage
// Errors returns a read channel of errors that occured during consuming, if enabled. By default,
// errors are logged and not returned over this channel. If you want to implement any custom errpr
// handling, set your config's Consumer.Return.Errors setting to true, and read from this channel.
Errors() <-chan *ConsumerError
// HighWaterMarkOffset returns the high water mark offset of the partition, i.e. the offset that will
// be used for the next message that will be produced. You can use this to determine how far behind
// the processing is.
HighWaterMarkOffset() int64
}
type partitionConsumer struct {
consumer *consumer
conf *Config
topic string
partition int32
broker *brokerConsumer
messages chan *ConsumerMessage
errors chan *ConsumerError
feeder chan *FetchResponse
trigger, dying chan none
responseResult error
fetchSize int32
offset int64
highWaterMarkOffset int64
}
var errTimedOut = errors.New("timed out feeding messages to the user") // not user-facing
func (child *partitionConsumer) sendError(err error) {
cErr := &ConsumerError{
Topic: child.topic,
Partition: child.partition,
Err: err,
}
if child.conf.Consumer.Return.Errors {
child.errors <- cErr
} else {
Logger.Println(cErr)
}
}
func (child *partitionConsumer) dispatcher() {
for _ = range child.trigger {
select {
case <-child.dying:
close(child.trigger)
case <-time.After(child.conf.Consumer.Retry.Backoff):
if child.broker != nil {
child.consumer.unrefBrokerConsumer(child.broker)
child.broker = nil
}
Logger.Printf("consumer/%s/%d finding new broker\n", child.topic, child.partition)
if err := child.dispatch(); err != nil {
child.sendError(err)
child.trigger <- none{}
}
}
}
if child.broker != nil {
child.consumer.unrefBrokerConsumer(child.broker)
}
child.consumer.removeChild(child)
close(child.feeder)
}
func (child *partitionConsumer) dispatch() error {
if err := child.consumer.client.RefreshMetadata(child.topic); err != nil {
return err
}
var leader *Broker
var err error
if leader, err = child.consumer.client.Leader(child.topic, child.partition); err != nil {
return err
}
child.broker = child.consumer.refBrokerConsumer(leader)
child.broker.input <- child
return nil
}
func (child *partitionConsumer) chooseStartingOffset(offset int64) error {
newestOffset, err := child.consumer.client.GetOffset(child.topic, child.partition, OffsetNewest)
if err != nil {
return err
}
oldestOffset, err := child.consumer.client.GetOffset(child.topic, child.partition, OffsetOldest)
if err != nil {
return err
}
switch {
case offset == OffsetNewest:
child.offset = newestOffset
case offset == OffsetOldest:
child.offset = oldestOffset
case offset >= oldestOffset && offset <= newestOffset:
child.offset = offset
default:
return ErrOffsetOutOfRange
}
return nil
}
func (child *partitionConsumer) Messages() <-chan *ConsumerMessage {
return child.messages
}
func (child *partitionConsumer) Errors() <-chan *ConsumerError {
return child.errors
}
func (child *partitionConsumer) AsyncClose() {
// this triggers whatever broker owns this child to abandon it and close its trigger channel, which causes
// the dispatcher to exit its loop, which removes it from the consumer then closes its 'messages' and
// 'errors' channel (alternatively, if the child is already at the dispatcher for some reason, that will
// also just close itself)
close(child.dying)
}
func (child *partitionConsumer) Close() error {
child.AsyncClose()
go withRecover(func() {
for _ = range child.messages {
// drain
}
})
var errors ConsumerErrors
for err := range child.errors {
errors = append(errors, err)
}
if len(errors) > 0 {
return errors
}
return nil
}
func (child *partitionConsumer) HighWaterMarkOffset() int64 {
return atomic.LoadInt64(&child.highWaterMarkOffset)
}
func (child *partitionConsumer) responseFeeder() {
var msgs []*ConsumerMessage
feederLoop:
for response := range child.feeder {
msgs, child.responseResult = child.parseResponse(response)
for i, msg := range msgs {
select {
case child.messages <- msg:
case <-time.After(child.conf.Consumer.MaxProcessingTime):
child.responseResult = errTimedOut
child.broker.acks.Done()
for _, msg = range msgs[i:] {
child.messages <- msg
}
child.broker.input <- child
continue feederLoop
}
}
child.broker.acks.Done()
}
close(child.messages)
close(child.errors)
}
func (child *partitionConsumer) parseResponse(response *FetchResponse) ([]*ConsumerMessage, error) {
block := response.GetBlock(child.topic, child.partition)
if block == nil {
return nil, ErrIncompleteResponse
}
if block.Err != ErrNoError {
return nil, block.Err
}
if len(block.MsgSet.Messages) == 0 {
// We got no messages. If we got a trailing one then we need to ask for more data.
// Otherwise we just poll again and wait for one to be produced...
if block.MsgSet.PartialTrailingMessage {
if child.conf.Consumer.Fetch.Max > 0 && child.fetchSize == child.conf.Consumer.Fetch.Max {
// we can't ask for more data, we've hit the configured limit
child.sendError(ErrMessageTooLarge)
child.offset++ // skip this one so we can keep processing future messages
} else {
child.fetchSize *= 2
if child.conf.Consumer.Fetch.Max > 0 && child.fetchSize > child.conf.Consumer.Fetch.Max {
child.fetchSize = child.conf.Consumer.Fetch.Max
}
}
}
return nil, nil
}
// we got messages, reset our fetch size in case it was increased for a previous request
child.fetchSize = child.conf.Consumer.Fetch.Default
atomic.StoreInt64(&child.highWaterMarkOffset, block.HighWaterMarkOffset)
incomplete := false
prelude := true
var messages []*ConsumerMessage
for _, msgBlock := range block.MsgSet.Messages {
for _, msg := range msgBlock.Messages() {
if prelude && msg.Offset < child.offset {
continue
}
prelude = false
if msg.Offset >= child.offset {
messages = append(messages, &ConsumerMessage{
Topic: child.topic,
Partition: child.partition,
Key: msg.Msg.Key,
Value: msg.Msg.Value,
Offset: msg.Offset,
})
child.offset = msg.Offset + 1
} else {
incomplete = true
}
}
}
if incomplete || len(messages) == 0 {
return nil, ErrIncompleteResponse
}
return messages, nil
}
// brokerConsumer
type brokerConsumer struct {
consumer *consumer
broker *Broker
input chan *partitionConsumer
newSubscriptions chan []*partitionConsumer
wait chan none
subscriptions map[*partitionConsumer]none
acks sync.WaitGroup
refs int
}
func (c *consumer) newBrokerConsumer(broker *Broker) *brokerConsumer {
bc := &brokerConsumer{
consumer: c,
broker: broker,
input: make(chan *partitionConsumer),
newSubscriptions: make(chan []*partitionConsumer),
wait: make(chan none),
subscriptions: make(map[*partitionConsumer]none),
refs: 0,
}
go withRecover(bc.subscriptionManager)
go withRecover(bc.subscriptionConsumer)
return bc
}
func (bc *brokerConsumer) subscriptionManager() {
var buffer []*partitionConsumer
// The subscriptionManager constantly accepts new subscriptions on `input` (even when the main subscriptionConsumer
// goroutine is in the middle of a network request) and batches it up. The main worker goroutine picks
// up a batch of new subscriptions between every network request by reading from `newSubscriptions`, so we give
// it nil if no new subscriptions are available. We also write to `wait` only when new subscriptions is available,
// so the main goroutine can block waiting for work if it has none.
for {
if len(buffer) > 0 {
select {
case event, ok := <-bc.input:
if !ok {
goto done
}
buffer = append(buffer, event)
case bc.newSubscriptions <- buffer:
buffer = nil
case bc.wait <- none{}:
}
} else {
select {
case event, ok := <-bc.input:
if !ok {
goto done
}
buffer = append(buffer, event)
case bc.newSubscriptions <- nil:
}
}
}
done:
close(bc.wait)
if len(buffer) > 0 {
bc.newSubscriptions <- buffer
}
close(bc.newSubscriptions)
}
func (bc *brokerConsumer) subscriptionConsumer() {
<-bc.wait // wait for our first piece of work
// the subscriptionConsumer ensures we will get nil right away if no new subscriptions is available
for newSubscriptions := range bc.newSubscriptions {
for _, child := range newSubscriptions {
bc.subscriptions[child] = none{}
Logger.Printf("consumer/broker/%d added subscription to %s/%d\n", bc.broker.ID(), child.topic, child.partition)
}
if len(bc.subscriptions) == 0 {
// We're about to be shut down or we're about to receive more subscriptions.
// Either way, the signal just hasn't propagated to our goroutine yet.
<-bc.wait
continue
}
response, err := bc.fetchNewMessages()
if err != nil {
Logger.Printf("consumer/broker/%d disconnecting due to error processing FetchRequest: %s\n", bc.broker.ID(), err)
bc.abort(err)
return
}
bc.acks.Add(len(bc.subscriptions))
for child := range bc.subscriptions {
child.feeder <- response
}
bc.acks.Wait()
bc.handleResponses()
}
}
func (bc *brokerConsumer) handleResponses() {
// handles the response codes left for us by our subscriptions, and abandons ones that have been closed
for child := range bc.subscriptions {
select {
case <-child.dying:
Logger.Printf("consumer/broker/%d closed dead subscription to %s/%d\n", bc.broker.ID(), child.topic, child.partition)
close(child.trigger)
delete(bc.subscriptions, child)
default:
result := child.responseResult
child.responseResult = nil
switch result {
case nil:
break
case errTimedOut:
Logger.Printf("consumer/broker/%d abandoned subscription to %s/%d because consuming was taking too long\n",
bc.broker.ID(), child.topic, child.partition)
delete(bc.subscriptions, child)
case ErrOffsetOutOfRange:
// there's no point in retrying this it will just fail the same way again
// shut it down and force the user to choose what to do
child.sendError(result)
Logger.Printf("consumer/%s/%d shutting down because %s\n", child.topic, child.partition, result)
close(child.trigger)
delete(bc.subscriptions, child)
case ErrUnknownTopicOrPartition, ErrNotLeaderForPartition, ErrLeaderNotAvailable:
// not an error, but does need redispatching
Logger.Printf("consumer/broker/%d abandoned subscription to %s/%d because %s\n",
bc.broker.ID(), child.topic, child.partition, result)
child.trigger <- none{}
delete(bc.subscriptions, child)
default:
// dunno, tell the user and try redispatching
child.sendError(result)
Logger.Printf("consumer/broker/%d abandoned subscription to %s/%d because %s\n",
bc.broker.ID(), child.topic, child.partition, result)
child.trigger <- none{}
delete(bc.subscriptions, child)
}
}
}
}
func (bc *brokerConsumer) abort(err error) {
bc.consumer.abandonBrokerConsumer(bc)
_ = bc.broker.Close() // we don't care about the error this might return, we already have one
for child := range bc.subscriptions {
child.sendError(err)
child.trigger <- none{}
}
for newSubscription := range bc.newSubscriptions {
for _, child := range newSubscription {
child.sendError(err)
child.trigger <- none{}
}
}
}
func (bc *brokerConsumer) fetchNewMessages() (*FetchResponse, error) {
request := &FetchRequest{
MinBytes: bc.consumer.conf.Consumer.Fetch.Min,
MaxWaitTime: int32(bc.consumer.conf.Consumer.MaxWaitTime / time.Millisecond),
}
for child := range bc.subscriptions {
request.AddBlock(child.topic, child.partition, child.offset, child.fetchSize)
}
return bc.broker.Fetch(request)
}

View File

@@ -1,22 +0,0 @@
package sarama
type ConsumerMetadataRequest struct {
ConsumerGroup string
}
func (r *ConsumerMetadataRequest) encode(pe packetEncoder) error {
return pe.putString(r.ConsumerGroup)
}
func (r *ConsumerMetadataRequest) decode(pd packetDecoder) (err error) {
r.ConsumerGroup, err = pd.getString()
return err
}
func (r *ConsumerMetadataRequest) key() int16 {
return 10
}
func (r *ConsumerMetadataRequest) version() int16 {
return 0
}

View File

@@ -1,19 +0,0 @@
package sarama
import "testing"
var (
consumerMetadataRequestEmpty = []byte{
0x00, 0x00}
consumerMetadataRequestString = []byte{
0x00, 0x06, 'f', 'o', 'o', 'b', 'a', 'r'}
)
func TestConsumerMetadataRequest(t *testing.T) {
request := new(ConsumerMetadataRequest)
testRequest(t, "empty string", request, consumerMetadataRequestEmpty)
request.ConsumerGroup = "foobar"
testRequest(t, "with string", request, consumerMetadataRequestString)
}

View File

@@ -1,73 +0,0 @@
package sarama
import (
"net"
"strconv"
)
type ConsumerMetadataResponse struct {
Err KError
Coordinator *Broker
CoordinatorID int32 // deprecated: use Coordinator.ID()
CoordinatorHost string // deprecated: use Coordinator.Addr()
CoordinatorPort int32 // deprecated: use Coordinator.Addr()
}
func (r *ConsumerMetadataResponse) decode(pd packetDecoder) (err error) {
tmp, err := pd.getInt16()
if err != nil {
return err
}
r.Err = KError(tmp)
coordinator := new(Broker)
if err := coordinator.decode(pd); err != nil {
return err
}
if coordinator.addr == ":0" {
return nil
}
r.Coordinator = coordinator
// this can all go away in 2.0, but we have to fill in deprecated fields to maintain
// backwards compatibility
host, portstr, err := net.SplitHostPort(r.Coordinator.Addr())
if err != nil {
return err
}
port, err := strconv.ParseInt(portstr, 10, 32)
if err != nil {
return err
}
r.CoordinatorID = r.Coordinator.ID()
r.CoordinatorHost = host
r.CoordinatorPort = int32(port)
return nil
}
func (r *ConsumerMetadataResponse) encode(pe packetEncoder) error {
pe.putInt16(int16(r.Err))
if r.Coordinator != nil {
host, portstr, err := net.SplitHostPort(r.Coordinator.Addr())
if err != nil {
return err
}
port, err := strconv.ParseInt(portstr, 10, 32)
if err != nil {
return err
}
pe.putInt32(r.Coordinator.ID())
if err := pe.putString(host); err != nil {
return err
}
pe.putInt32(int32(port))
return nil
}
pe.putInt32(r.CoordinatorID)
if err := pe.putString(r.CoordinatorHost); err != nil {
return err
}
pe.putInt32(r.CoordinatorPort)
return nil
}

View File

@@ -1,35 +0,0 @@
package sarama
import "testing"
var (
consumerMetadataResponseError = []byte{
0x00, 0x0E,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00,
0x00, 0x00, 0x00, 0x00}
consumerMetadataResponseSuccess = []byte{
0x00, 0x00,
0x00, 0x00, 0x00, 0xAB,
0x00, 0x03, 'f', 'o', 'o',
0x00, 0x00, 0xCC, 0xDD}
)
func TestConsumerMetadataResponseError(t *testing.T) {
response := ConsumerMetadataResponse{Err: ErrOffsetsLoadInProgress}
testResponse(t, "error", &response, consumerMetadataResponseError)
}
func TestConsumerMetadataResponseSuccess(t *testing.T) {
broker := NewBroker("foo:52445")
broker.id = 0xAB
response := ConsumerMetadataResponse{
Coordinator: broker,
CoordinatorID: 0xAB,
CoordinatorHost: "foo",
CoordinatorPort: 0xCCDD,
Err: ErrNoError,
}
testResponse(t, "success", &response, consumerMetadataResponseSuccess)
}

View File

@@ -1,844 +0,0 @@
package sarama
import (
"sync"
"testing"
"time"
)
var testMsg = StringEncoder("Foo")
// If a particular offset is provided then messages are consumed starting from
// that offset.
func TestConsumerOffsetManual(t *testing.T) {
// Given
broker0 := newMockBroker(t, 0)
mockFetchResponse := newMockFetchResponse(t, 1)
for i := 0; i < 10; i++ {
mockFetchResponse.SetMessage("my_topic", 0, int64(i+1234), testMsg)
}
broker0.SetHandlerByMap(map[string]MockResponse{
"MetadataRequest": newMockMetadataResponse(t).
SetBroker(broker0.Addr(), broker0.BrokerID()).
SetLeader("my_topic", 0, broker0.BrokerID()),
"OffsetRequest": newMockOffsetResponse(t).
SetOffset("my_topic", 0, OffsetOldest, 0).
SetOffset("my_topic", 0, OffsetNewest, 2345),
"FetchRequest": mockFetchResponse,
})
// When
master, err := NewConsumer([]string{broker0.Addr()}, nil)
if err != nil {
t.Fatal(err)
}
consumer, err := master.ConsumePartition("my_topic", 0, 1234)
if err != nil {
t.Fatal(err)
}
// Then: messages starting from offset 1234 are consumed.
for i := 0; i < 10; i++ {
select {
case message := <-consumer.Messages():
assertMessageOffset(t, message, int64(i+1234))
case err := <-consumer.Errors():
t.Error(err)
}
}
safeClose(t, consumer)
safeClose(t, master)
broker0.Close()
}
// If `OffsetNewest` is passed as the initial offset then the first consumed
// message is indeed corresponds to the offset that broker claims to be the
// newest in its metadata response.
func TestConsumerOffsetNewest(t *testing.T) {
// Given
broker0 := newMockBroker(t, 0)
broker0.SetHandlerByMap(map[string]MockResponse{
"MetadataRequest": newMockMetadataResponse(t).
SetBroker(broker0.Addr(), broker0.BrokerID()).
SetLeader("my_topic", 0, broker0.BrokerID()),
"OffsetRequest": newMockOffsetResponse(t).
SetOffset("my_topic", 0, OffsetNewest, 10).
SetOffset("my_topic", 0, OffsetOldest, 7),
"FetchRequest": newMockFetchResponse(t, 1).
SetMessage("my_topic", 0, 9, testMsg).
SetMessage("my_topic", 0, 10, testMsg).
SetMessage("my_topic", 0, 11, testMsg).
SetHighWaterMark("my_topic", 0, 14),
})
master, err := NewConsumer([]string{broker0.Addr()}, nil)
if err != nil {
t.Fatal(err)
}
// When
consumer, err := master.ConsumePartition("my_topic", 0, OffsetNewest)
if err != nil {
t.Fatal(err)
}
// Then
assertMessageOffset(t, <-consumer.Messages(), 10)
if hwmo := consumer.HighWaterMarkOffset(); hwmo != 14 {
t.Errorf("Expected high water mark offset 14, found %d", hwmo)
}
safeClose(t, consumer)
safeClose(t, master)
broker0.Close()
}
// It is possible to close a partition consumer and create the same anew.
func TestConsumerRecreate(t *testing.T) {
// Given
broker0 := newMockBroker(t, 0)
broker0.SetHandlerByMap(map[string]MockResponse{
"MetadataRequest": newMockMetadataResponse(t).
SetBroker(broker0.Addr(), broker0.BrokerID()).
SetLeader("my_topic", 0, broker0.BrokerID()),
"OffsetRequest": newMockOffsetResponse(t).
SetOffset("my_topic", 0, OffsetOldest, 0).
SetOffset("my_topic", 0, OffsetNewest, 1000),
"FetchRequest": newMockFetchResponse(t, 1).
SetMessage("my_topic", 0, 10, testMsg),
})
c, err := NewConsumer([]string{broker0.Addr()}, nil)
if err != nil {
t.Fatal(err)
}
pc, err := c.ConsumePartition("my_topic", 0, 10)
if err != nil {
t.Fatal(err)
}
assertMessageOffset(t, <-pc.Messages(), 10)
// When
safeClose(t, pc)
pc, err = c.ConsumePartition("my_topic", 0, 10)
if err != nil {
t.Fatal(err)
}
// Then
assertMessageOffset(t, <-pc.Messages(), 10)
safeClose(t, pc)
safeClose(t, c)
broker0.Close()
}
// An attempt to consume the same partition twice should fail.
func TestConsumerDuplicate(t *testing.T) {
// Given
broker0 := newMockBroker(t, 0)
broker0.SetHandlerByMap(map[string]MockResponse{
"MetadataRequest": newMockMetadataResponse(t).
SetBroker(broker0.Addr(), broker0.BrokerID()).
SetLeader("my_topic", 0, broker0.BrokerID()),
"OffsetRequest": newMockOffsetResponse(t).
SetOffset("my_topic", 0, OffsetOldest, 0).
SetOffset("my_topic", 0, OffsetNewest, 1000),
"FetchRequest": newMockFetchResponse(t, 1),
})
config := NewConfig()
config.ChannelBufferSize = 0
c, err := NewConsumer([]string{broker0.Addr()}, config)
if err != nil {
t.Fatal(err)
}
pc1, err := c.ConsumePartition("my_topic", 0, 0)
if err != nil {
t.Fatal(err)
}
// When
pc2, err := c.ConsumePartition("my_topic", 0, 0)
// Then
if pc2 != nil || err != ConfigurationError("That topic/partition is already being consumed") {
t.Fatal("A partition cannot be consumed twice at the same time")
}
safeClose(t, pc1)
safeClose(t, c)
broker0.Close()
}
// If consumer fails to refresh metadata it keeps retrying with frequency
// specified by `Config.Consumer.Retry.Backoff`.
func TestConsumerLeaderRefreshError(t *testing.T) {
// Given
broker0 := newMockBroker(t, 100)
// Stage 1: my_topic/0 served by broker0
Logger.Printf(" STAGE 1")
broker0.SetHandlerByMap(map[string]MockResponse{
"MetadataRequest": newMockMetadataResponse(t).
SetBroker(broker0.Addr(), broker0.BrokerID()).
SetLeader("my_topic", 0, broker0.BrokerID()),
"OffsetRequest": newMockOffsetResponse(t).
SetOffset("my_topic", 0, OffsetOldest, 123).
SetOffset("my_topic", 0, OffsetNewest, 1000),
"FetchRequest": newMockFetchResponse(t, 1).
SetMessage("my_topic", 0, 123, testMsg),
})
config := NewConfig()
config.Net.ReadTimeout = 100 * time.Millisecond
config.Consumer.Retry.Backoff = 200 * time.Millisecond
config.Consumer.Return.Errors = true
config.Metadata.Retry.Max = 0
c, err := NewConsumer([]string{broker0.Addr()}, config)
if err != nil {
t.Fatal(err)
}
pc, err := c.ConsumePartition("my_topic", 0, OffsetOldest)
if err != nil {
t.Fatal(err)
}
assertMessageOffset(t, <-pc.Messages(), 123)
// Stage 2: broker0 says that it is no longer the leader for my_topic/0,
// but the requests to retrieve metadata fail with network timeout.
Logger.Printf(" STAGE 2")
fetchResponse2 := &FetchResponse{}
fetchResponse2.AddError("my_topic", 0, ErrNotLeaderForPartition)
broker0.SetHandlerByMap(map[string]MockResponse{
"FetchRequest": newMockWrapper(fetchResponse2),
})
if consErr := <-pc.Errors(); consErr.Err != ErrOutOfBrokers {
t.Errorf("Unexpected error: %v", consErr.Err)
}
// Stage 3: finally the metadata returned by broker0 tells that broker1 is
// a new leader for my_topic/0. Consumption resumes.
Logger.Printf(" STAGE 3")
broker1 := newMockBroker(t, 101)
broker1.SetHandlerByMap(map[string]MockResponse{
"FetchRequest": newMockFetchResponse(t, 1).
SetMessage("my_topic", 0, 124, testMsg),
})
broker0.SetHandlerByMap(map[string]MockResponse{
"MetadataRequest": newMockMetadataResponse(t).
SetBroker(broker0.Addr(), broker0.BrokerID()).
SetBroker(broker1.Addr(), broker1.BrokerID()).
SetLeader("my_topic", 0, broker1.BrokerID()),
})
assertMessageOffset(t, <-pc.Messages(), 124)
safeClose(t, pc)
safeClose(t, c)
broker1.Close()
broker0.Close()
}
func TestConsumerInvalidTopic(t *testing.T) {
// Given
broker0 := newMockBroker(t, 100)
broker0.SetHandlerByMap(map[string]MockResponse{
"MetadataRequest": newMockMetadataResponse(t).
SetBroker(broker0.Addr(), broker0.BrokerID()),
})
c, err := NewConsumer([]string{broker0.Addr()}, nil)
if err != nil {
t.Fatal(err)
}
// When
pc, err := c.ConsumePartition("my_topic", 0, OffsetOldest)
// Then
if pc != nil || err != ErrUnknownTopicOrPartition {
t.Errorf("Should fail with, err=%v", err)
}
safeClose(t, c)
broker0.Close()
}
// Nothing bad happens if a partition consumer that has no leader assigned at
// the moment is closed.
func TestConsumerClosePartitionWithoutLeader(t *testing.T) {
// Given
broker0 := newMockBroker(t, 100)
broker0.SetHandlerByMap(map[string]MockResponse{
"MetadataRequest": newMockMetadataResponse(t).
SetBroker(broker0.Addr(), broker0.BrokerID()).
SetLeader("my_topic", 0, broker0.BrokerID()),
"OffsetRequest": newMockOffsetResponse(t).
SetOffset("my_topic", 0, OffsetOldest, 123).
SetOffset("my_topic", 0, OffsetNewest, 1000),
"FetchRequest": newMockFetchResponse(t, 1).
SetMessage("my_topic", 0, 123, testMsg),
})
config := NewConfig()
config.Net.ReadTimeout = 100 * time.Millisecond
config.Consumer.Retry.Backoff = 100 * time.Millisecond
config.Consumer.Return.Errors = true
config.Metadata.Retry.Max = 0
c, err := NewConsumer([]string{broker0.Addr()}, config)
if err != nil {
t.Fatal(err)
}
pc, err := c.ConsumePartition("my_topic", 0, OffsetOldest)
if err != nil {
t.Fatal(err)
}
assertMessageOffset(t, <-pc.Messages(), 123)
// broker0 says that it is no longer the leader for my_topic/0, but the
// requests to retrieve metadata fail with network timeout.
fetchResponse2 := &FetchResponse{}
fetchResponse2.AddError("my_topic", 0, ErrNotLeaderForPartition)
broker0.SetHandlerByMap(map[string]MockResponse{
"FetchRequest": newMockWrapper(fetchResponse2),
})
// When
if consErr := <-pc.Errors(); consErr.Err != ErrOutOfBrokers {
t.Errorf("Unexpected error: %v", consErr.Err)
}
// Then: the partition consumer can be closed without any problem.
safeClose(t, pc)
safeClose(t, c)
broker0.Close()
}
// If the initial offset passed on partition consumer creation is out of the
// actual offset range for the partition, then the partition consumer stops
// immediately closing its output channels.
func TestConsumerShutsDownOutOfRange(t *testing.T) {
// Given
broker0 := newMockBroker(t, 0)
broker0.SetHandler(func(req *request) (res encoder) {
switch reqBody := req.body.(type) {
case *MetadataRequest:
return newMockMetadataResponse(t).
SetBroker(broker0.Addr(), broker0.BrokerID()).
SetLeader("my_topic", 0, broker0.BrokerID()).
For(reqBody)
case *OffsetRequest:
return newMockOffsetResponse(t).
SetOffset("my_topic", 0, OffsetNewest, 1234).
SetOffset("my_topic", 0, OffsetOldest, 7).
For(reqBody)
case *FetchRequest:
fetchResponse := new(FetchResponse)
fetchResponse.AddError("my_topic", 0, ErrOffsetOutOfRange)
return fetchResponse
}
return nil
})
master, err := NewConsumer([]string{broker0.Addr()}, nil)
if err != nil {
t.Fatal(err)
}
// When
consumer, err := master.ConsumePartition("my_topic", 0, 101)
if err != nil {
t.Fatal(err)
}
// Then: consumer should shut down closing its messages and errors channels.
if _, ok := <-consumer.Messages(); ok {
t.Error("Expected the consumer to shut down")
}
safeClose(t, consumer)
safeClose(t, master)
broker0.Close()
}
// If a fetch response contains messages with offsets that are smaller then
// requested, then such messages are ignored.
func TestConsumerExtraOffsets(t *testing.T) {
// Given
broker0 := newMockBroker(t, 0)
called := 0
broker0.SetHandler(func(req *request) (res encoder) {
switch req.body.(type) {
case *MetadataRequest:
return newMockMetadataResponse(t).
SetBroker(broker0.Addr(), broker0.BrokerID()).
SetLeader("my_topic", 0, broker0.BrokerID()).For(req.body)
case *OffsetRequest:
return newMockOffsetResponse(t).
SetOffset("my_topic", 0, OffsetNewest, 1234).
SetOffset("my_topic", 0, OffsetOldest, 0).For(req.body)
case *FetchRequest:
fetchResponse := &FetchResponse{}
called++
if called > 1 {
fetchResponse.AddError("my_topic", 0, ErrNoError)
return fetchResponse
}
fetchResponse.AddMessage("my_topic", 0, nil, testMsg, 1)
fetchResponse.AddMessage("my_topic", 0, nil, testMsg, 2)
fetchResponse.AddMessage("my_topic", 0, nil, testMsg, 3)
fetchResponse.AddMessage("my_topic", 0, nil, testMsg, 4)
return fetchResponse
}
return nil
})
master, err := NewConsumer([]string{broker0.Addr()}, nil)
if err != nil {
t.Fatal(err)
}
// When
consumer, err := master.ConsumePartition("my_topic", 0, 3)
if err != nil {
t.Fatal(err)
}
// Then: messages with offsets 1 and 2 are not returned even though they
// are present in the response.
assertMessageOffset(t, <-consumer.Messages(), 3)
assertMessageOffset(t, <-consumer.Messages(), 4)
safeClose(t, consumer)
safeClose(t, master)
broker0.Close()
}
// It is fine if offsets of fetched messages are not sequential (although
// strictly increasing!).
func TestConsumerNonSequentialOffsets(t *testing.T) {
// Given
broker0 := newMockBroker(t, 0)
called := 0
broker0.SetHandler(func(req *request) (res encoder) {
switch req.body.(type) {
case *MetadataRequest:
return newMockMetadataResponse(t).
SetBroker(broker0.Addr(), broker0.BrokerID()).
SetLeader("my_topic", 0, broker0.BrokerID()).For(req.body)
case *OffsetRequest:
return newMockOffsetResponse(t).
SetOffset("my_topic", 0, OffsetNewest, 1234).
SetOffset("my_topic", 0, OffsetOldest, 0).For(req.body)
case *FetchRequest:
called++
fetchResponse := &FetchResponse{}
if called > 1 {
fetchResponse.AddError("my_topic", 0, ErrNoError)
return fetchResponse
}
fetchResponse.AddMessage("my_topic", 0, nil, testMsg, 5)
fetchResponse.AddMessage("my_topic", 0, nil, testMsg, 7)
fetchResponse.AddMessage("my_topic", 0, nil, testMsg, 11)
return fetchResponse
}
return nil
})
master, err := NewConsumer([]string{broker0.Addr()}, nil)
if err != nil {
t.Fatal(err)
}
// When
consumer, err := master.ConsumePartition("my_topic", 0, 3)
if err != nil {
t.Fatal(err)
}
// Then: messages with offsets 1 and 2 are not returned even though they
// are present in the response.
assertMessageOffset(t, <-consumer.Messages(), 5)
assertMessageOffset(t, <-consumer.Messages(), 7)
assertMessageOffset(t, <-consumer.Messages(), 11)
safeClose(t, consumer)
safeClose(t, master)
broker0.Close()
}
// If leadership for a partition is changing then consumer resolves the new
// leader and switches to it.
func TestConsumerRebalancingMultiplePartitions(t *testing.T) {
// initial setup
seedBroker := newMockBroker(t, 10)
leader0 := newMockBroker(t, 0)
leader1 := newMockBroker(t, 1)
seedBroker.SetHandlerByMap(map[string]MockResponse{
"MetadataRequest": newMockMetadataResponse(t).
SetBroker(leader0.Addr(), leader0.BrokerID()).
SetBroker(leader1.Addr(), leader1.BrokerID()).
SetLeader("my_topic", 0, leader0.BrokerID()).
SetLeader("my_topic", 1, leader1.BrokerID()),
})
mockOffsetResponse1 := newMockOffsetResponse(t).
SetOffset("my_topic", 0, OffsetOldest, 0).
SetOffset("my_topic", 0, OffsetNewest, 1000).
SetOffset("my_topic", 1, OffsetOldest, 0).
SetOffset("my_topic", 1, OffsetNewest, 1000)
leader0.SetHandlerByMap(map[string]MockResponse{
"OffsetRequest": mockOffsetResponse1,
"FetchRequest": newMockFetchResponse(t, 1),
})
leader1.SetHandlerByMap(map[string]MockResponse{
"OffsetRequest": mockOffsetResponse1,
"FetchRequest": newMockFetchResponse(t, 1),
})
// launch test goroutines
config := NewConfig()
config.Consumer.Retry.Backoff = 50
master, err := NewConsumer([]string{seedBroker.Addr()}, config)
if err != nil {
t.Fatal(err)
}
// we expect to end up (eventually) consuming exactly ten messages on each partition
var wg sync.WaitGroup
for i := int32(0); i < 2; i++ {
consumer, err := master.ConsumePartition("my_topic", i, 0)
if err != nil {
t.Error(err)
}
go func(c PartitionConsumer) {
for err := range c.Errors() {
t.Error(err)
}
}(consumer)
wg.Add(1)
go func(partition int32, c PartitionConsumer) {
for i := 0; i < 10; i++ {
message := <-consumer.Messages()
if message.Offset != int64(i) {
t.Error("Incorrect message offset!", i, partition, message.Offset)
}
if message.Partition != partition {
t.Error("Incorrect message partition!")
}
}
safeClose(t, consumer)
wg.Done()
}(i, consumer)
}
time.Sleep(50 * time.Millisecond)
Logger.Printf(" STAGE 1")
// Stage 1:
// * my_topic/0 -> leader0 serves 4 messages
// * my_topic/1 -> leader1 serves 0 messages
mockFetchResponse := newMockFetchResponse(t, 1)
for i := 0; i < 4; i++ {
mockFetchResponse.SetMessage("my_topic", 0, int64(i), testMsg)
}
leader0.SetHandlerByMap(map[string]MockResponse{
"FetchRequest": mockFetchResponse,
})
time.Sleep(50 * time.Millisecond)
Logger.Printf(" STAGE 2")
// Stage 2:
// * leader0 says that it is no longer serving my_topic/0
// * seedBroker tells that leader1 is serving my_topic/0 now
// seed broker tells that the new partition 0 leader is leader1
seedBroker.SetHandlerByMap(map[string]MockResponse{
"MetadataRequest": newMockMetadataResponse(t).
SetLeader("my_topic", 0, leader1.BrokerID()).
SetLeader("my_topic", 1, leader1.BrokerID()),
})
// leader0 says no longer leader of partition 0
leader0.SetHandler(func(req *request) (res encoder) {
switch req.body.(type) {
case *FetchRequest:
fetchResponse := new(FetchResponse)
fetchResponse.AddError("my_topic", 0, ErrNotLeaderForPartition)
return fetchResponse
}
return nil
})
time.Sleep(50 * time.Millisecond)
Logger.Printf(" STAGE 3")
// Stage 3:
// * my_topic/0 -> leader1 serves 3 messages
// * my_topic/1 -> leader1 server 8 messages
// leader1 provides 3 message on partition 0, and 8 messages on partition 1
mockFetchResponse2 := newMockFetchResponse(t, 2)
for i := 4; i < 7; i++ {
mockFetchResponse2.SetMessage("my_topic", 0, int64(i), testMsg)
}
for i := 0; i < 8; i++ {
mockFetchResponse2.SetMessage("my_topic", 1, int64(i), testMsg)
}
leader1.SetHandlerByMap(map[string]MockResponse{
"FetchRequest": mockFetchResponse2,
})
time.Sleep(50 * time.Millisecond)
Logger.Printf(" STAGE 4")
// Stage 4:
// * my_topic/0 -> leader1 serves 3 messages
// * my_topic/1 -> leader1 tells that it is no longer the leader
// * seedBroker tells that leader0 is a new leader for my_topic/1
// metadata assigns 0 to leader1 and 1 to leader0
seedBroker.SetHandlerByMap(map[string]MockResponse{
"MetadataRequest": newMockMetadataResponse(t).
SetLeader("my_topic", 0, leader1.BrokerID()).
SetLeader("my_topic", 1, leader0.BrokerID()),
})
// leader1 provides three more messages on partition0, says no longer leader of partition1
mockFetchResponse3 := newMockFetchResponse(t, 3).
SetMessage("my_topic", 0, int64(7), testMsg).
SetMessage("my_topic", 0, int64(8), testMsg).
SetMessage("my_topic", 0, int64(9), testMsg)
leader1.SetHandler(func(req *request) (res encoder) {
switch reqBody := req.body.(type) {
case *FetchRequest:
res := mockFetchResponse3.For(reqBody).(*FetchResponse)
res.AddError("my_topic", 1, ErrNotLeaderForPartition)
return res
}
return nil
})
// leader0 provides two messages on partition 1
mockFetchResponse4 := newMockFetchResponse(t, 2)
for i := 8; i < 10; i++ {
mockFetchResponse4.SetMessage("my_topic", 1, int64(i), testMsg)
}
leader0.SetHandlerByMap(map[string]MockResponse{
"FetchRequest": mockFetchResponse4,
})
wg.Wait()
safeClose(t, master)
leader1.Close()
leader0.Close()
seedBroker.Close()
}
// When two partitions have the same broker as the leader, if one partition
// consumer channel buffer is full then that does not affect the ability to
// read messages by the other consumer.
func TestConsumerInterleavedClose(t *testing.T) {
// Given
broker0 := newMockBroker(t, 0)
broker0.SetHandlerByMap(map[string]MockResponse{
"MetadataRequest": newMockMetadataResponse(t).
SetBroker(broker0.Addr(), broker0.BrokerID()).
SetLeader("my_topic", 0, broker0.BrokerID()).
SetLeader("my_topic", 1, broker0.BrokerID()),
"OffsetRequest": newMockOffsetResponse(t).
SetOffset("my_topic", 0, OffsetOldest, 1000).
SetOffset("my_topic", 0, OffsetNewest, 1100).
SetOffset("my_topic", 1, OffsetOldest, 2000).
SetOffset("my_topic", 1, OffsetNewest, 2100),
"FetchRequest": newMockFetchResponse(t, 1).
SetMessage("my_topic", 0, 1000, testMsg).
SetMessage("my_topic", 0, 1001, testMsg).
SetMessage("my_topic", 0, 1002, testMsg).
SetMessage("my_topic", 1, 2000, testMsg),
})
config := NewConfig()
config.ChannelBufferSize = 0
master, err := NewConsumer([]string{broker0.Addr()}, config)
if err != nil {
t.Fatal(err)
}
c0, err := master.ConsumePartition("my_topic", 0, 1000)
if err != nil {
t.Fatal(err)
}
c1, err := master.ConsumePartition("my_topic", 1, 2000)
if err != nil {
t.Fatal(err)
}
// When/Then: we can read from partition 0 even if nobody reads from partition 1
assertMessageOffset(t, <-c0.Messages(), 1000)
assertMessageOffset(t, <-c0.Messages(), 1001)
assertMessageOffset(t, <-c0.Messages(), 1002)
safeClose(t, c1)
safeClose(t, c0)
safeClose(t, master)
broker0.Close()
}
func TestConsumerBounceWithReferenceOpen(t *testing.T) {
broker0 := newMockBroker(t, 0)
broker0Addr := broker0.Addr()
broker1 := newMockBroker(t, 1)
mockMetadataResponse := newMockMetadataResponse(t).
SetBroker(broker0.Addr(), broker0.BrokerID()).
SetBroker(broker1.Addr(), broker1.BrokerID()).
SetLeader("my_topic", 0, broker0.BrokerID()).
SetLeader("my_topic", 1, broker1.BrokerID())
mockOffsetResponse := newMockOffsetResponse(t).
SetOffset("my_topic", 0, OffsetOldest, 1000).
SetOffset("my_topic", 0, OffsetNewest, 1100).
SetOffset("my_topic", 1, OffsetOldest, 2000).
SetOffset("my_topic", 1, OffsetNewest, 2100)
mockFetchResponse := newMockFetchResponse(t, 1)
for i := 0; i < 10; i++ {
mockFetchResponse.SetMessage("my_topic", 0, int64(1000+i), testMsg)
mockFetchResponse.SetMessage("my_topic", 1, int64(2000+i), testMsg)
}
broker0.SetHandlerByMap(map[string]MockResponse{
"OffsetRequest": mockOffsetResponse,
"FetchRequest": mockFetchResponse,
})
broker1.SetHandlerByMap(map[string]MockResponse{
"MetadataRequest": mockMetadataResponse,
"OffsetRequest": mockOffsetResponse,
"FetchRequest": mockFetchResponse,
})
config := NewConfig()
config.Consumer.Return.Errors = true
config.Consumer.Retry.Backoff = 100 * time.Millisecond
config.ChannelBufferSize = 1
master, err := NewConsumer([]string{broker1.Addr()}, config)
if err != nil {
t.Fatal(err)
}
c0, err := master.ConsumePartition("my_topic", 0, 1000)
if err != nil {
t.Fatal(err)
}
c1, err := master.ConsumePartition("my_topic", 1, 2000)
if err != nil {
t.Fatal(err)
}
// read messages from both partition to make sure that both brokers operate
// normally.
assertMessageOffset(t, <-c0.Messages(), 1000)
assertMessageOffset(t, <-c1.Messages(), 2000)
// Simulate broker shutdown. Note that metadata response does not change,
// that is the leadership does not move to another broker. So partition
// consumer will keep retrying to restore the connection with the broker.
broker0.Close()
// Make sure that while the partition/0 leader is down, consumer/partition/1
// is capable of pulling messages from broker1.
for i := 1; i < 7; i++ {
offset := (<-c1.Messages()).Offset
if offset != int64(2000+i) {
t.Errorf("Expected offset %d from consumer/partition/1", int64(2000+i))
}
}
// Bring broker0 back to service.
broker0 = newMockBrokerAddr(t, 0, broker0Addr)
broker0.SetHandlerByMap(map[string]MockResponse{
"FetchRequest": mockFetchResponse,
})
// Read the rest of messages from both partitions.
for i := 7; i < 10; i++ {
assertMessageOffset(t, <-c1.Messages(), int64(2000+i))
}
for i := 1; i < 10; i++ {
assertMessageOffset(t, <-c0.Messages(), int64(1000+i))
}
select {
case <-c0.Errors():
default:
t.Errorf("Partition consumer should have detected broker restart")
}
safeClose(t, c1)
safeClose(t, c0)
safeClose(t, master)
broker0.Close()
broker1.Close()
}
func TestConsumerOffsetOutOfRange(t *testing.T) {
// Given
broker0 := newMockBroker(t, 2)
broker0.SetHandlerByMap(map[string]MockResponse{
"MetadataRequest": newMockMetadataResponse(t).
SetBroker(broker0.Addr(), broker0.BrokerID()).
SetLeader("my_topic", 0, broker0.BrokerID()),
"OffsetRequest": newMockOffsetResponse(t).
SetOffset("my_topic", 0, OffsetNewest, 1234).
SetOffset("my_topic", 0, OffsetOldest, 2345),
})
master, err := NewConsumer([]string{broker0.Addr()}, nil)
if err != nil {
t.Fatal(err)
}
// When/Then
if _, err := master.ConsumePartition("my_topic", 0, 0); err != ErrOffsetOutOfRange {
t.Fatal("Should return ErrOffsetOutOfRange, got:", err)
}
if _, err := master.ConsumePartition("my_topic", 0, 3456); err != ErrOffsetOutOfRange {
t.Fatal("Should return ErrOffsetOutOfRange, got:", err)
}
if _, err := master.ConsumePartition("my_topic", 0, -3); err != ErrOffsetOutOfRange {
t.Fatal("Should return ErrOffsetOutOfRange, got:", err)
}
safeClose(t, master)
broker0.Close()
}
func assertMessageOffset(t *testing.T, msg *ConsumerMessage, expectedOffset int64) {
if msg.Offset != expectedOffset {
t.Errorf("Incorrect message offset: expected=%d, actual=%d", expectedOffset, msg.Offset)
}
}

View File

@@ -1,35 +0,0 @@
package sarama
import (
"encoding/binary"
"hash/crc32"
)
// crc32Field implements the pushEncoder and pushDecoder interfaces for calculating CRC32s.
type crc32Field struct {
startOffset int
}
func (c *crc32Field) saveOffset(in int) {
c.startOffset = in
}
func (c *crc32Field) reserveLength() int {
return 4
}
func (c *crc32Field) run(curOffset int, buf []byte) error {
crc := crc32.ChecksumIEEE(buf[c.startOffset+4 : curOffset])
binary.BigEndian.PutUint32(buf[c.startOffset:], crc)
return nil
}
func (c *crc32Field) check(curOffset int, buf []byte) error {
crc := crc32.ChecksumIEEE(buf[c.startOffset+4 : curOffset])
if crc != binary.BigEndian.Uint32(buf[c.startOffset:]) {
return PacketDecodingError{"CRC didn't match"}
}
return nil
}

View File

@@ -1,62 +0,0 @@
package sarama
import "fmt"
// Encoder is the interface that wraps the basic Encode method.
// Anything implementing Encoder can be turned into bytes using Kafka's encoding rules.
type encoder interface {
encode(pe packetEncoder) error
}
// Encode takes an Encoder and turns it into bytes.
func encode(e encoder) ([]byte, error) {
if e == nil {
return nil, nil
}
var prepEnc prepEncoder
var realEnc realEncoder
err := e.encode(&prepEnc)
if err != nil {
return nil, err
}
if prepEnc.length < 0 || prepEnc.length > int(MaxRequestSize) {
return nil, PacketEncodingError{fmt.Sprintf("invalid request size (%d)", prepEnc.length)}
}
realEnc.raw = make([]byte, prepEnc.length)
err = e.encode(&realEnc)
if err != nil {
return nil, err
}
return realEnc.raw, nil
}
// Decoder is the interface that wraps the basic Decode method.
// Anything implementing Decoder can be extracted from bytes using Kafka's encoding rules.
type decoder interface {
decode(pd packetDecoder) error
}
// Decode takes bytes and a Decoder and fills the fields of the decoder from the bytes,
// interpreted using Kafka's encoding rules.
func decode(buf []byte, in decoder) error {
if buf == nil {
return nil
}
helper := realDecoder{raw: buf}
err := in.decode(&helper)
if err != nil {
return err
}
if helper.off != len(buf) {
return PacketDecodingError{"invalid length"}
}
return nil
}

View File

@@ -1,146 +0,0 @@
package sarama
import (
"errors"
"fmt"
)
// ErrOutOfBrokers is the error returned when the client has run out of brokers to talk to because all of them errored
// or otherwise failed to respond.
var ErrOutOfBrokers = errors.New("kafka: client has run out of available brokers to talk to (Is your cluster reachable?)")
// ErrClosedClient is the error returned when a method is called on a client that has been closed.
var ErrClosedClient = errors.New("kafka: tried to use a client that was closed")
// ErrIncompleteResponse is the error returned when the server returns a syntactically valid response, but it does
// not contain the expected information.
var ErrIncompleteResponse = errors.New("kafka: response did not contain all the expected topic/partition blocks")
// ErrInvalidPartition is the error returned when a partitioner returns an invalid partition index
// (meaning one outside of the range [0...numPartitions-1]).
var ErrInvalidPartition = errors.New("kafka: partitioner returned an invalid partition index")
// ErrAlreadyConnected is the error returned when calling Open() on a Broker that is already connected or connecting.
var ErrAlreadyConnected = errors.New("kafka: broker connection already initiated")
// ErrNotConnected is the error returned when trying to send or call Close() on a Broker that is not connected.
var ErrNotConnected = errors.New("kafka: broker not connected")
// ErrInsufficientData is returned when decoding and the packet is truncated. This can be expected
// when requesting messages, since as an optimization the server is allowed to return a partial message at the end
// of the message set.
var ErrInsufficientData = errors.New("kafka: insufficient data to decode packet, more bytes expected")
// ErrShuttingDown is returned when a producer receives a message during shutdown.
var ErrShuttingDown = errors.New("kafka: message received by producer in process of shutting down")
// ErrMessageTooLarge is returned when the next message to consume is larger than the configured Consumer.Fetch.Max
var ErrMessageTooLarge = errors.New("kafka: message is larger than Consumer.Fetch.Max")
// PacketEncodingError is returned from a failure while encoding a Kafka packet. This can happen, for example,
// if you try to encode a string over 2^15 characters in length, since Kafka's encoding rules do not permit that.
type PacketEncodingError struct {
Info string
}
func (err PacketEncodingError) Error() string {
return fmt.Sprintf("kafka: error encoding packet: %s", err.Info)
}
// PacketDecodingError is returned when there was an error (other than truncated data) decoding the Kafka broker's response.
// This can be a bad CRC or length field, or any other invalid value.
type PacketDecodingError struct {
Info string
}
func (err PacketDecodingError) Error() string {
return fmt.Sprintf("kafka: error decoding packet: %s", err.Info)
}
// ConfigurationError is the type of error returned from a constructor (e.g. NewClient, or NewConsumer)
// when the specified configuration is invalid.
type ConfigurationError string
func (err ConfigurationError) Error() string {
return "kafka: invalid configuration (" + string(err) + ")"
}
// KError is the type of error that can be returned directly by the Kafka broker.
// See https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol#AGuideToTheKafkaProtocol-ErrorCodes
type KError int16
// Numeric error codes returned by the Kafka server.
const (
ErrNoError KError = 0
ErrUnknown KError = -1
ErrOffsetOutOfRange KError = 1
ErrInvalidMessage KError = 2
ErrUnknownTopicOrPartition KError = 3
ErrInvalidMessageSize KError = 4
ErrLeaderNotAvailable KError = 5
ErrNotLeaderForPartition KError = 6
ErrRequestTimedOut KError = 7
ErrBrokerNotAvailable KError = 8
ErrReplicaNotAvailable KError = 9
ErrMessageSizeTooLarge KError = 10
ErrStaleControllerEpochCode KError = 11
ErrOffsetMetadataTooLarge KError = 12
ErrOffsetsLoadInProgress KError = 14
ErrConsumerCoordinatorNotAvailable KError = 15
ErrNotCoordinatorForConsumer KError = 16
ErrInvalidTopic KError = 17
ErrMessageSetSizeTooLarge KError = 18
ErrNotEnoughReplicas KError = 19
ErrNotEnoughReplicasAfterAppend KError = 20
)
func (err KError) Error() string {
// Error messages stolen/adapted from
// https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol
switch err {
case ErrNoError:
return "kafka server: Not an error, why are you printing me?"
case ErrUnknown:
return "kafka server: Unexpected (unknown?) server error."
case ErrOffsetOutOfRange:
return "kafka server: The requested offset is outside the range of offsets maintained by the server for the given topic/partition."
case ErrInvalidMessage:
return "kafka server: Message contents does not match its CRC."
case ErrUnknownTopicOrPartition:
return "kafka server: Request was for a topic or partition that does not exist on this broker."
case ErrInvalidMessageSize:
return "kafka server: The message has a negative size."
case ErrLeaderNotAvailable:
return "kafka server: In the middle of a leadership election, there is currently no leader for this partition and hence it is unavailable for writes."
case ErrNotLeaderForPartition:
return "kafka server: Tried to send a message to a replica that is not the leader for some partition. Your metadata is out of date."
case ErrRequestTimedOut:
return "kafka server: Request exceeded the user-specified time limit in the request."
case ErrBrokerNotAvailable:
return "kafka server: Broker not available. Not a client facing error, we should never receive this!!!"
case ErrReplicaNotAvailable:
return "kafka server: Replica infomation not available, one or more brokers are down."
case ErrMessageSizeTooLarge:
return "kafka server: Message was too large, server rejected it to avoid allocation error."
case ErrStaleControllerEpochCode:
return "kafka server: StaleControllerEpochCode (internal error code for broker-to-broker communication)."
case ErrOffsetMetadataTooLarge:
return "kafka server: Specified a string larger than the configured maximum for offset metadata."
case ErrOffsetsLoadInProgress:
return "kafka server: The broker is still loading offsets after a leader change for that offset's topic partition."
case ErrConsumerCoordinatorNotAvailable:
return "kafka server: Offset's topic has not yet been created."
case ErrNotCoordinatorForConsumer:
return "kafka server: Request was for a consumer group that is not coordinated by this broker."
case ErrInvalidTopic:
return "kafka server: The request attempted to perform an operation on an invalid topic."
case ErrMessageSetSizeTooLarge:
return "kafka server: The request included message batch larger than the configured segment size on the server."
case ErrNotEnoughReplicas:
return "kafka server: Messages are rejected since there are fewer in-sync replicas than required."
case ErrNotEnoughReplicasAfterAppend:
return "kafka server: Messages are written to the log, but to fewer in-sync replicas than required."
}
return fmt.Sprintf("Unknown error, how did this happen? Error code = %d", err)
}

View File

@@ -1,9 +0,0 @@
# Sarama examples
This folder contains example applications to demonstrate the use of Sarama. For code snippet examples on how to use the different types in Sarama, see [Sarams's API documentation on godoc.org](https://godoc.org/github.com/Shopify/sarama)
In these examples, we use `github.com/Shopify/sarama` as import path. We do this to ensure all the examples are up to date with the latest changes in Sarama. For your own applications, you may want to use `gopkg.in/Shopify/sarama.v1` to lock into a stable API version.
#### HTTP server
[http_server](./http_server) is a simple HTTP server uses both the sync producer to produce data as part of the request handling cycle, as well as the async producer to maintain an access log. It also uses the [mocks subpackage](https://godoc.org/github.com/Shopify/sarama/mocks) to test both.

View File

@@ -1,2 +0,0 @@
http_server
http_server.test

View File

@@ -1,7 +0,0 @@
# HTTP server example
This HTTP server example shows you how to use the AsyncProducer and SyncProducer, and how to test them using mocks. The server simply sends the data of the HTTP request's query string to Kafka, and send a 200 result if that succeeds. For every request, it will send an access log entry to Kafka as well in the background.
If you need to know whether a message was successfully sent to the Kafka cluster before you can send your HTTP response, using the `SyncProducer` is probably the simplest way to achieve this. If you don't care, e.g. for the access log, using the `AsyncProducer` will let you fire and forget. You can send the HTTP response, while the message is being produced in the background.
One important thing to note is that both the `SyncProducer` and `AsyncProducer` are **thread-safe**. Go's `http.Server` handles requests concurrently in different goroutines, but you can use a single producer safely. This will actually achieve efficiency gains as the producer will be able to batch messages from concurrent requests together.

View File

@@ -1,246 +0,0 @@
package main
import (
"github.com/Shopify/sarama"
"crypto/tls"
"crypto/x509"
"encoding/json"
"flag"
"fmt"
"io/ioutil"
"log"
"net/http"
"os"
"strings"
"time"
)
var (
addr = flag.String("addr", ":8080", "The address to bind to")
brokers = flag.String("brokers", os.Getenv("KAFKA_PEERS"), "The Kafka brokers to connect to, as a comma separated list")
verbose = flag.Bool("verbose", false, "Turn on Sarama logging")
certFile = flag.String("certificate", "", "The optional certificate file for client authentication")
keyFile = flag.String("key", "", "The optional key file for client authentication")
caFile = flag.String("ca", "", "The optional certificate authority file for TLS client authentication")
verifySsl = flag.Bool("verify", false, "Optional verify ssl certificates chain")
)
func main() {
flag.Parse()
if *verbose {
sarama.Logger = log.New(os.Stdout, "[sarama] ", log.LstdFlags)
}
if *brokers == "" {
flag.PrintDefaults()
os.Exit(1)
}
brokerList := strings.Split(*brokers, ",")
log.Printf("Kafka brokers: %s", strings.Join(brokerList, ", "))
server := &Server{
DataCollector: newDataCollector(brokerList),
AccessLogProducer: newAccessLogProducer(brokerList),
}
defer func() {
if err := server.Close(); err != nil {
log.Println("Failed to close server", err)
}
}()
log.Fatal(server.Run(*addr))
}
func createTlsConfiguration() (t *tls.Config) {
if *certFile != "" && *keyFile != "" && *caFile != "" {
cert, err := tls.LoadX509KeyPair(*certFile, *keyFile)
if err != nil {
log.Fatal(err)
}
caCert, err := ioutil.ReadFile(*caFile)
if err != nil {
log.Fatal(err)
}
caCertPool := x509.NewCertPool()
caCertPool.AppendCertsFromPEM(caCert)
t = &tls.Config{
Certificates: []tls.Certificate{cert},
RootCAs: caCertPool,
InsecureSkipVerify: *verifySsl,
}
}
// will be nil by default if nothing is provided
return t
}
type Server struct {
DataCollector sarama.SyncProducer
AccessLogProducer sarama.AsyncProducer
}
func (s *Server) Close() error {
if err := s.DataCollector.Close(); err != nil {
log.Println("Failed to shut down data collector cleanly", err)
}
if err := s.AccessLogProducer.Close(); err != nil {
log.Println("Failed to shut down access log producer cleanly", err)
}
return nil
}
func (s *Server) Handler() http.Handler {
return s.withAccessLog(s.collectQueryStringData())
}
func (s *Server) Run(addr string) error {
httpServer := &http.Server{
Addr: addr,
Handler: s.Handler(),
}
log.Printf("Listening for requests on %s...\n", addr)
return httpServer.ListenAndServe()
}
func (s *Server) collectQueryStringData() http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if r.URL.Path != "/" {
http.NotFound(w, r)
return
}
// We are not setting a message key, which means that all messages will
// be distributed randomly over the different partitions.
partition, offset, err := s.DataCollector.SendMessage(&sarama.ProducerMessage{
Topic: "important",
Value: sarama.StringEncoder(r.URL.RawQuery),
})
if err != nil {
w.WriteHeader(http.StatusInternalServerError)
fmt.Fprintf(w, "Failed to store your data:, %s", err)
} else {
// The tuple (topic, partition, offset) can be used as a unique identifier
// for a message in a Kafka cluster.
fmt.Fprintf(w, "Your data is stored with unique identifier important/%d/%d", partition, offset)
}
})
}
type accessLogEntry struct {
Method string `json:"method"`
Host string `json:"host"`
Path string `json:"path"`
IP string `json:"ip"`
ResponseTime float64 `json:"response_time"`
encoded []byte
err error
}
func (ale *accessLogEntry) ensureEncoded() {
if ale.encoded == nil && ale.err == nil {
ale.encoded, ale.err = json.Marshal(ale)
}
}
func (ale *accessLogEntry) Length() int {
ale.ensureEncoded()
return len(ale.encoded)
}
func (ale *accessLogEntry) Encode() ([]byte, error) {
ale.ensureEncoded()
return ale.encoded, ale.err
}
func (s *Server) withAccessLog(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
started := time.Now()
next.ServeHTTP(w, r)
entry := &accessLogEntry{
Method: r.Method,
Host: r.Host,
Path: r.RequestURI,
IP: r.RemoteAddr,
ResponseTime: float64(time.Since(started)) / float64(time.Second),
}
// We will use the client's IP address as key. This will cause
// all the access log entries of the same IP address to end up
// on the same partition.
s.AccessLogProducer.Input() <- &sarama.ProducerMessage{
Topic: "access_log",
Key: sarama.StringEncoder(r.RemoteAddr),
Value: entry,
}
})
}
func newDataCollector(brokerList []string) sarama.SyncProducer {
// For the data collector, we are looking for strong consistency semantics.
// Because we don't change the flush settings, sarama will try to produce messages
// as fast as possible to keep latency low.
config := sarama.NewConfig()
config.Producer.RequiredAcks = sarama.WaitForAll // Wait for all in-sync replicas to ack the message
config.Producer.Retry.Max = 10 // Retry up to 10 times to produce the message
tlsConfig := createTlsConfiguration()
if tlsConfig != nil {
config.Net.TLS.Config = tlsConfig
config.Net.TLS.Enable = true
}
// On the broker side, you may want to change the following settings to get
// stronger consistency guarantees:
// - For your broker, set `unclean.leader.election.enable` to false
// - For the topic, you could increase `min.insync.replicas`.
producer, err := sarama.NewSyncProducer(brokerList, config)
if err != nil {
log.Fatalln("Failed to start Sarama producer:", err)
}
return producer
}
func newAccessLogProducer(brokerList []string) sarama.AsyncProducer {
// For the access log, we are looking for AP semantics, with high throughput.
// By creating batches of compressed messages, we reduce network I/O at a cost of more latency.
config := sarama.NewConfig()
tlsConfig := createTlsConfiguration()
if tlsConfig != nil {
config.Net.TLS.Enable = true
config.Net.TLS.Config = tlsConfig
}
config.Producer.RequiredAcks = sarama.WaitForLocal // Only wait for the leader to ack
config.Producer.Compression = sarama.CompressionSnappy // Compress messages
config.Producer.Flush.Frequency = 500 * time.Millisecond // Flush batches every 500ms
producer, err := sarama.NewAsyncProducer(brokerList, config)
if err != nil {
log.Fatalln("Failed to start Sarama producer:", err)
}
// We will just log to STDOUT if we're not able to produce messages.
// Note: messages will only be returned here after all retry attempts are exhausted.
go func() {
for err := range producer.Errors() {
log.Println("Failed to write access log entry:", err)
}
}()
return producer
}

View File

@@ -1,109 +0,0 @@
package main
import (
"io"
"net/http"
"net/http/httptest"
"testing"
"github.com/Shopify/sarama"
"github.com/Shopify/sarama/mocks"
)
// In normal operation, we expect one access log entry,
// and one data collector entry. Let's assume both will succeed.
// We should return a HTTP 200 status.
func TestCollectSuccessfully(t *testing.T) {
dataCollectorMock := mocks.NewSyncProducer(t, nil)
dataCollectorMock.ExpectSendMessageAndSucceed()
accessLogProducerMock := mocks.NewAsyncProducer(t, nil)
accessLogProducerMock.ExpectInputAndSucceed()
// Now, use dependency injection to use the mocks.
s := &Server{
DataCollector: dataCollectorMock,
AccessLogProducer: accessLogProducerMock,
}
// The Server's Close call is important; it will call Close on
// the two mock producers, which will then validate whether all
// expectations are resolved.
defer safeClose(t, s)
req, err := http.NewRequest("GET", "http://example.com/?data", nil)
if err != nil {
t.Fatal(err)
}
res := httptest.NewRecorder()
s.Handler().ServeHTTP(res, req)
if res.Code != 200 {
t.Errorf("Expected HTTP status 200, found %d", res.Code)
}
if string(res.Body.Bytes()) != "Your data is stored with unique identifier important/0/1" {
t.Error("Unexpected response body", res.Body)
}
}
// Now, let's see if we handle the case of not being able to produce
// to the data collector properly. In this case we should return a 500 status.
func TestCollectionFailure(t *testing.T) {
dataCollectorMock := mocks.NewSyncProducer(t, nil)
dataCollectorMock.ExpectSendMessageAndFail(sarama.ErrRequestTimedOut)
accessLogProducerMock := mocks.NewAsyncProducer(t, nil)
accessLogProducerMock.ExpectInputAndSucceed()
s := &Server{
DataCollector: dataCollectorMock,
AccessLogProducer: accessLogProducerMock,
}
defer safeClose(t, s)
req, err := http.NewRequest("GET", "http://example.com/?data", nil)
if err != nil {
t.Fatal(err)
}
res := httptest.NewRecorder()
s.Handler().ServeHTTP(res, req)
if res.Code != 500 {
t.Errorf("Expected HTTP status 500, found %d", res.Code)
}
}
// We don't expect any data collector calls because the path is wrong,
// so we are not setting any expectations on the dataCollectorMock. It
// will still generate an access log entry though.
func TestWrongPath(t *testing.T) {
dataCollectorMock := mocks.NewSyncProducer(t, nil)
accessLogProducerMock := mocks.NewAsyncProducer(t, nil)
accessLogProducerMock.ExpectInputAndSucceed()
s := &Server{
DataCollector: dataCollectorMock,
AccessLogProducer: accessLogProducerMock,
}
defer safeClose(t, s)
req, err := http.NewRequest("GET", "http://example.com/wrong?data", nil)
if err != nil {
t.Fatal(err)
}
res := httptest.NewRecorder()
s.Handler().ServeHTTP(res, req)
if res.Code != 404 {
t.Errorf("Expected HTTP status 404, found %d", res.Code)
}
}
func safeClose(t *testing.T, o io.Closer) {
if err := o.Close(); err != nil {
t.Error(err)
}
}

View File

@@ -1,123 +0,0 @@
package sarama
type fetchRequestBlock struct {
fetchOffset int64
maxBytes int32
}
func (f *fetchRequestBlock) encode(pe packetEncoder) error {
pe.putInt64(f.fetchOffset)
pe.putInt32(f.maxBytes)
return nil
}
func (f *fetchRequestBlock) decode(pd packetDecoder) (err error) {
if f.fetchOffset, err = pd.getInt64(); err != nil {
return err
}
if f.maxBytes, err = pd.getInt32(); err != nil {
return err
}
return nil
}
type FetchRequest struct {
MaxWaitTime int32
MinBytes int32
blocks map[string]map[int32]*fetchRequestBlock
}
func (f *FetchRequest) encode(pe packetEncoder) (err error) {
pe.putInt32(-1) // replica ID is always -1 for clients
pe.putInt32(f.MaxWaitTime)
pe.putInt32(f.MinBytes)
err = pe.putArrayLength(len(f.blocks))
if err != nil {
return err
}
for topic, blocks := range f.blocks {
err = pe.putString(topic)
if err != nil {
return err
}
err = pe.putArrayLength(len(blocks))
if err != nil {
return err
}
for partition, block := range blocks {
pe.putInt32(partition)
err = block.encode(pe)
if err != nil {
return err
}
}
}
return nil
}
func (f *FetchRequest) decode(pd packetDecoder) (err error) {
if _, err = pd.getInt32(); err != nil {
return err
}
if f.MaxWaitTime, err = pd.getInt32(); err != nil {
return err
}
if f.MinBytes, err = pd.getInt32(); err != nil {
return err
}
topicCount, err := pd.getArrayLength()
if err != nil {
return err
}
if topicCount == 0 {
return nil
}
f.blocks = make(map[string]map[int32]*fetchRequestBlock)
for i := 0; i < topicCount; i++ {
topic, err := pd.getString()
if err != nil {
return err
}
partitionCount, err := pd.getArrayLength()
if err != nil {
return err
}
f.blocks[topic] = make(map[int32]*fetchRequestBlock)
for j := 0; j < partitionCount; j++ {
partition, err := pd.getInt32()
if err != nil {
return err
}
fetchBlock := &fetchRequestBlock{}
if err = fetchBlock.decode(pd); err != nil {
return nil
}
f.blocks[topic][partition] = fetchBlock
}
}
return nil
}
func (f *FetchRequest) key() int16 {
return 1
}
func (f *FetchRequest) version() int16 {
return 0
}
func (f *FetchRequest) AddBlock(topic string, partitionID int32, fetchOffset int64, maxBytes int32) {
if f.blocks == nil {
f.blocks = make(map[string]map[int32]*fetchRequestBlock)
}
if f.blocks[topic] == nil {
f.blocks[topic] = make(map[int32]*fetchRequestBlock)
}
tmp := new(fetchRequestBlock)
tmp.maxBytes = maxBytes
tmp.fetchOffset = fetchOffset
f.blocks[topic][partitionID] = tmp
}

View File

@@ -1,34 +0,0 @@
package sarama
import "testing"
var (
fetchRequestNoBlocks = []byte{
0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00}
fetchRequestWithProperties = []byte{
0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0xEF,
0x00, 0x00, 0x00, 0x00}
fetchRequestOneBlock = []byte{
0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x01,
0x00, 0x05, 't', 'o', 'p', 'i', 'c',
0x00, 0x00, 0x00, 0x01,
0x00, 0x00, 0x00, 0x12, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x34, 0x00, 0x00, 0x00, 0x56}
)
func TestFetchRequest(t *testing.T) {
request := new(FetchRequest)
testRequest(t, "no blocks", request, fetchRequestNoBlocks)
request.MaxWaitTime = 0x20
request.MinBytes = 0xEF
testRequest(t, "with properties", request, fetchRequestWithProperties)
request.MaxWaitTime = 0
request.MinBytes = 0
request.AddBlock("topic", 0x12, 0x34, 0x56)
testRequest(t, "one block", request, fetchRequestOneBlock)
}

View File

@@ -1,173 +0,0 @@
package sarama
type FetchResponseBlock struct {
Err KError
HighWaterMarkOffset int64
MsgSet MessageSet
}
func (pr *FetchResponseBlock) decode(pd packetDecoder) (err error) {
tmp, err := pd.getInt16()
if err != nil {
return err
}
pr.Err = KError(tmp)
pr.HighWaterMarkOffset, err = pd.getInt64()
if err != nil {
return err
}
msgSetSize, err := pd.getInt32()
if err != nil {
return err
}
msgSetDecoder, err := pd.getSubset(int(msgSetSize))
if err != nil {
return err
}
err = (&pr.MsgSet).decode(msgSetDecoder)
return err
}
type FetchResponse struct {
Blocks map[string]map[int32]*FetchResponseBlock
}
func (pr *FetchResponseBlock) encode(pe packetEncoder) (err error) {
pe.putInt16(int16(pr.Err))
pe.putInt64(pr.HighWaterMarkOffset)
pe.push(&lengthField{})
err = pr.MsgSet.encode(pe)
if err != nil {
return err
}
return pe.pop()
}
func (fr *FetchResponse) decode(pd packetDecoder) (err error) {
numTopics, err := pd.getArrayLength()
if err != nil {
return err
}
fr.Blocks = make(map[string]map[int32]*FetchResponseBlock, numTopics)
for i := 0; i < numTopics; i++ {
name, err := pd.getString()
if err != nil {
return err
}
numBlocks, err := pd.getArrayLength()
if err != nil {
return err
}
fr.Blocks[name] = make(map[int32]*FetchResponseBlock, numBlocks)
for j := 0; j < numBlocks; j++ {
id, err := pd.getInt32()
if err != nil {
return err
}
block := new(FetchResponseBlock)
err = block.decode(pd)
if err != nil {
return err
}
fr.Blocks[name][id] = block
}
}
return nil
}
func (fr *FetchResponse) encode(pe packetEncoder) (err error) {
err = pe.putArrayLength(len(fr.Blocks))
if err != nil {
return err
}
for topic, partitions := range fr.Blocks {
err = pe.putString(topic)
if err != nil {
return err
}
err = pe.putArrayLength(len(partitions))
if err != nil {
return err
}
for id, block := range partitions {
pe.putInt32(id)
err = block.encode(pe)
if err != nil {
return err
}
}
}
return nil
}
func (fr *FetchResponse) GetBlock(topic string, partition int32) *FetchResponseBlock {
if fr.Blocks == nil {
return nil
}
if fr.Blocks[topic] == nil {
return nil
}
return fr.Blocks[topic][partition]
}
func (fr *FetchResponse) AddError(topic string, partition int32, err KError) {
if fr.Blocks == nil {
fr.Blocks = make(map[string]map[int32]*FetchResponseBlock)
}
partitions, ok := fr.Blocks[topic]
if !ok {
partitions = make(map[int32]*FetchResponseBlock)
fr.Blocks[topic] = partitions
}
frb, ok := partitions[partition]
if !ok {
frb = new(FetchResponseBlock)
partitions[partition] = frb
}
frb.Err = err
}
func (fr *FetchResponse) AddMessage(topic string, partition int32, key, value Encoder, offset int64) {
if fr.Blocks == nil {
fr.Blocks = make(map[string]map[int32]*FetchResponseBlock)
}
partitions, ok := fr.Blocks[topic]
if !ok {
partitions = make(map[int32]*FetchResponseBlock)
fr.Blocks[topic] = partitions
}
frb, ok := partitions[partition]
if !ok {
frb = new(FetchResponseBlock)
partitions[partition] = frb
}
var kb []byte
var vb []byte
if key != nil {
kb, _ = key.Encode()
}
if value != nil {
vb, _ = value.Encode()
}
msg := &Message{Key: kb, Value: vb}
msgBlock := &MessageBlock{Msg: msg, Offset: offset}
frb.MsgSet.Messages = append(frb.MsgSet.Messages, msgBlock)
}

View File

@@ -1,84 +0,0 @@
package sarama
import (
"bytes"
"testing"
)
var (
emptyFetchResponse = []byte{
0x00, 0x00, 0x00, 0x00}
oneMessageFetchResponse = []byte{
0x00, 0x00, 0x00, 0x01,
0x00, 0x05, 't', 'o', 'p', 'i', 'c',
0x00, 0x00, 0x00, 0x01,
0x00, 0x00, 0x00, 0x05,
0x00, 0x01,
0x00, 0x00, 0x00, 0x00, 0x10, 0x10, 0x10, 0x10,
0x00, 0x00, 0x00, 0x1C,
// messageSet
0x00, 0x00, 0x00, 0x00, 0x00, 0x55, 0x00, 0x00,
0x00, 0x00, 0x00, 0x10,
// message
0x23, 0x96, 0x4a, 0xf7, // CRC
0x00,
0x00,
0xFF, 0xFF, 0xFF, 0xFF,
0x00, 0x00, 0x00, 0x02, 0x00, 0xEE}
)
func TestEmptyFetchResponse(t *testing.T) {
response := FetchResponse{}
testDecodable(t, "empty", &response, emptyFetchResponse)
if len(response.Blocks) != 0 {
t.Error("Decoding produced topic blocks where there were none.")
}
}
func TestOneMessageFetchResponse(t *testing.T) {
response := FetchResponse{}
testDecodable(t, "one message", &response, oneMessageFetchResponse)
if len(response.Blocks) != 1 {
t.Fatal("Decoding produced incorrect number of topic blocks.")
}
if len(response.Blocks["topic"]) != 1 {
t.Fatal("Decoding produced incorrect number of partition blocks for topic.")
}
block := response.GetBlock("topic", 5)
if block == nil {
t.Fatal("GetBlock didn't return block.")
}
if block.Err != ErrOffsetOutOfRange {
t.Error("Decoding didn't produce correct error code.")
}
if block.HighWaterMarkOffset != 0x10101010 {
t.Error("Decoding didn't produce correct high water mark offset.")
}
if block.MsgSet.PartialTrailingMessage {
t.Error("Decoding detected a partial trailing message where there wasn't one.")
}
if len(block.MsgSet.Messages) != 1 {
t.Fatal("Decoding produced incorrect number of messages.")
}
msgBlock := block.MsgSet.Messages[0]
if msgBlock.Offset != 0x550000 {
t.Error("Decoding produced incorrect message offset.")
}
msg := msgBlock.Msg
if msg.Codec != CompressionNone {
t.Error("Decoding produced incorrect message compression.")
}
if msg.Key != nil {
t.Error("Decoding produced message key where there was none.")
}
if !bytes.Equal(msg.Value, []byte{0x00, 0xEE}) {
t.Error("Decoding produced incorrect message value.")
}
}

View File

@@ -1,90 +0,0 @@
package sarama
import (
"fmt"
"testing"
"time"
)
func TestFuncConnectionFailure(t *testing.T) {
setupFunctionalTest(t)
defer teardownFunctionalTest(t)
Proxies["kafka1"].Enabled = false
SaveProxy(t, "kafka1")
config := NewConfig()
config.Metadata.Retry.Max = 1
_, err := NewClient([]string{kafkaBrokers[0]}, config)
if err != ErrOutOfBrokers {
t.Fatal("Expected returned error to be ErrOutOfBrokers, but was: ", err)
}
}
func TestFuncClientMetadata(t *testing.T) {
setupFunctionalTest(t)
defer teardownFunctionalTest(t)
config := NewConfig()
config.Metadata.Retry.Max = 1
config.Metadata.Retry.Backoff = 10 * time.Millisecond
client, err := NewClient(kafkaBrokers, config)
if err != nil {
t.Fatal(err)
}
if err := client.RefreshMetadata("unknown_topic"); err != ErrUnknownTopicOrPartition {
t.Error("Expected ErrUnknownTopicOrPartition, got", err)
}
if _, err := client.Leader("unknown_topic", 0); err != ErrUnknownTopicOrPartition {
t.Error("Expected ErrUnknownTopicOrPartition, got", err)
}
if _, err := client.Replicas("invalid/topic", 0); err != ErrUnknownTopicOrPartition {
t.Error("Expected ErrUnknownTopicOrPartition, got", err)
}
partitions, err := client.Partitions("test.4")
if err != nil {
t.Error(err)
}
if len(partitions) != 4 {
t.Errorf("Expected test.4 topic to have 4 partitions, found %v", partitions)
}
partitions, err = client.Partitions("test.1")
if err != nil {
t.Error(err)
}
if len(partitions) != 1 {
t.Errorf("Expected test.1 topic to have 1 partitions, found %v", partitions)
}
safeClose(t, client)
}
func TestFuncClientCoordinator(t *testing.T) {
checkKafkaVersion(t, "0.8.2")
setupFunctionalTest(t)
defer teardownFunctionalTest(t)
client, err := NewClient(kafkaBrokers, nil)
if err != nil {
t.Fatal(err)
}
for i := 0; i < 10; i++ {
broker, err := client.Coordinator(fmt.Sprintf("another_new_consumer_group_%d", i))
if err != nil {
t.Error(err)
}
if connected, err := broker.Connected(); !connected || err != nil {
t.Errorf("Expected to coordinator %s broker to be properly connected.", broker.Addr())
}
}
safeClose(t, client)
}

View File

@@ -1,61 +0,0 @@
package sarama
import (
"math"
"testing"
)
func TestFuncConsumerOffsetOutOfRange(t *testing.T) {
setupFunctionalTest(t)
defer teardownFunctionalTest(t)
consumer, err := NewConsumer(kafkaBrokers, nil)
if err != nil {
t.Fatal(err)
}
if _, err := consumer.ConsumePartition("test.1", 0, -10); err != ErrOffsetOutOfRange {
t.Error("Expected ErrOffsetOutOfRange, got:", err)
}
if _, err := consumer.ConsumePartition("test.1", 0, math.MaxInt64); err != ErrOffsetOutOfRange {
t.Error("Expected ErrOffsetOutOfRange, got:", err)
}
safeClose(t, consumer)
}
func TestConsumerHighWaterMarkOffset(t *testing.T) {
setupFunctionalTest(t)
defer teardownFunctionalTest(t)
p, err := NewSyncProducer(kafkaBrokers, nil)
if err != nil {
t.Fatal(err)
}
defer safeClose(t, p)
_, offset, err := p.SendMessage(&ProducerMessage{Topic: "test.1", Value: StringEncoder("Test")})
if err != nil {
t.Fatal(err)
}
c, err := NewConsumer(kafkaBrokers, nil)
if err != nil {
t.Fatal(err)
}
defer safeClose(t, c)
pc, err := c.ConsumePartition("test.1", 0, OffsetOldest)
if err != nil {
t.Fatal(err)
}
<-pc.Messages()
if hwmo := pc.HighWaterMarkOffset(); hwmo != offset+1 {
t.Logf("Last produced offset %d; high water mark should be one higher but found %d.", offset, hwmo)
}
safeClose(t, pc)
}

View File

@@ -1,203 +0,0 @@
package sarama
import (
"fmt"
"sync"
"testing"
"time"
)
const TestBatchSize = 1000
func TestFuncProducing(t *testing.T) {
config := NewConfig()
testProducingMessages(t, config)
}
func TestFuncProducingGzip(t *testing.T) {
config := NewConfig()
config.Producer.Compression = CompressionGZIP
testProducingMessages(t, config)
}
func TestFuncProducingSnappy(t *testing.T) {
config := NewConfig()
config.Producer.Compression = CompressionSnappy
testProducingMessages(t, config)
}
func TestFuncProducingNoResponse(t *testing.T) {
config := NewConfig()
config.Producer.RequiredAcks = NoResponse
testProducingMessages(t, config)
}
func TestFuncProducingFlushing(t *testing.T) {
config := NewConfig()
config.Producer.Flush.Messages = TestBatchSize / 8
config.Producer.Flush.Frequency = 250 * time.Millisecond
testProducingMessages(t, config)
}
func TestFuncMultiPartitionProduce(t *testing.T) {
setupFunctionalTest(t)
defer teardownFunctionalTest(t)
config := NewConfig()
config.ChannelBufferSize = 20
config.Producer.Flush.Frequency = 50 * time.Millisecond
config.Producer.Flush.Messages = 200
config.Producer.Return.Successes = true
producer, err := NewSyncProducer(kafkaBrokers, config)
if err != nil {
t.Fatal(err)
}
var wg sync.WaitGroup
wg.Add(TestBatchSize)
for i := 1; i <= TestBatchSize; i++ {
go func(i int) {
defer wg.Done()
msg := &ProducerMessage{Topic: "test.64", Key: nil, Value: StringEncoder(fmt.Sprintf("hur %d", i))}
if _, _, err := producer.SendMessage(msg); err != nil {
t.Error(i, err)
}
}(i)
}
wg.Wait()
if err := producer.Close(); err != nil {
t.Error(err)
}
}
func TestFuncProducingToInvalidTopic(t *testing.T) {
setupFunctionalTest(t)
defer teardownFunctionalTest(t)
producer, err := NewSyncProducer(kafkaBrokers, nil)
if err != nil {
t.Fatal(err)
}
if _, _, err := producer.SendMessage(&ProducerMessage{Topic: "in/valid"}); err != ErrUnknownTopicOrPartition {
t.Error("Expected ErrUnknownTopicOrPartition, found", err)
}
if _, _, err := producer.SendMessage(&ProducerMessage{Topic: "in/valid"}); err != ErrUnknownTopicOrPartition {
t.Error("Expected ErrUnknownTopicOrPartition, found", err)
}
safeClose(t, producer)
}
func testProducingMessages(t *testing.T, config *Config) {
setupFunctionalTest(t)
defer teardownFunctionalTest(t)
config.Producer.Return.Successes = true
config.Consumer.Return.Errors = true
client, err := NewClient(kafkaBrokers, config)
if err != nil {
t.Fatal(err)
}
master, err := NewConsumerFromClient(client)
if err != nil {
t.Fatal(err)
}
consumer, err := master.ConsumePartition("test.1", 0, OffsetNewest)
if err != nil {
t.Fatal(err)
}
producer, err := NewAsyncProducerFromClient(client)
if err != nil {
t.Fatal(err)
}
expectedResponses := TestBatchSize
for i := 1; i <= TestBatchSize; {
msg := &ProducerMessage{Topic: "test.1", Key: nil, Value: StringEncoder(fmt.Sprintf("testing %d", i))}
select {
case producer.Input() <- msg:
i++
case ret := <-producer.Errors():
t.Fatal(ret.Err)
case <-producer.Successes():
expectedResponses--
}
}
for expectedResponses > 0 {
select {
case ret := <-producer.Errors():
t.Fatal(ret.Err)
case <-producer.Successes():
expectedResponses--
}
}
safeClose(t, producer)
for i := 1; i <= TestBatchSize; i++ {
select {
case <-time.After(10 * time.Second):
t.Fatal("Not received any more events in the last 10 seconds.")
case err := <-consumer.Errors():
t.Error(err)
case message := <-consumer.Messages():
if string(message.Value) != fmt.Sprintf("testing %d", i) {
t.Fatalf("Unexpected message with index %d: %s", i, message.Value)
}
}
}
safeClose(t, consumer)
safeClose(t, client)
}
// Benchmarks
func BenchmarkProducerSmall(b *testing.B) {
benchmarkProducer(b, nil, "test.64", ByteEncoder(make([]byte, 128)))
}
func BenchmarkProducerMedium(b *testing.B) {
benchmarkProducer(b, nil, "test.64", ByteEncoder(make([]byte, 1024)))
}
func BenchmarkProducerLarge(b *testing.B) {
benchmarkProducer(b, nil, "test.64", ByteEncoder(make([]byte, 8192)))
}
func BenchmarkProducerSmallSinglePartition(b *testing.B) {
benchmarkProducer(b, nil, "test.1", ByteEncoder(make([]byte, 128)))
}
func BenchmarkProducerMediumSnappy(b *testing.B) {
conf := NewConfig()
conf.Producer.Compression = CompressionSnappy
benchmarkProducer(b, conf, "test.1", ByteEncoder(make([]byte, 1024)))
}
func benchmarkProducer(b *testing.B, conf *Config, topic string, value Encoder) {
setupFunctionalTest(b)
defer teardownFunctionalTest(b)
producer, err := NewAsyncProducer(kafkaBrokers, conf)
if err != nil {
b.Fatal(err)
}
b.ResetTimer()
for i := 1; i <= b.N; {
msg := &ProducerMessage{Topic: topic, Key: StringEncoder(fmt.Sprintf("%d", i)), Value: value}
select {
case producer.Input() <- msg:
i++
case ret := <-producer.Errors():
b.Fatal(ret.Err)
}
}
safeClose(b, producer)
}

View File

@@ -1,146 +0,0 @@
package sarama
import (
"log"
"math/rand"
"net"
"os"
"strconv"
"strings"
"testing"
"time"
toxiproxy "github.com/Shopify/toxiproxy/client"
)
const (
VagrantToxiproxy = "http://192.168.100.67:8474"
VagrantKafkaPeers = "192.168.100.67:9091,192.168.100.67:9092,192.168.100.67:9093,192.168.100.67:9094,192.168.100.67:9095"
VagrantZookeeperPeers = "192.168.100.67:2181,192.168.100.67:2182,192.168.100.67:2183,192.168.100.67:2184,192.168.100.67:2185"
)
var (
kafkaAvailable, kafkaRequired bool
kafkaBrokers []string
proxyClient *toxiproxy.Client
Proxies map[string]*toxiproxy.Proxy
ZKProxies = []string{"zk1", "zk2", "zk3", "zk4", "zk5"}
KafkaProxies = []string{"kafka1", "kafka2", "kafka3", "kafka4", "kafka5"}
)
func init() {
if os.Getenv("DEBUG") == "true" {
Logger = log.New(os.Stdout, "[sarama] ", log.LstdFlags)
}
seed := time.Now().UTC().UnixNano()
if tmp := os.Getenv("TEST_SEED"); tmp != "" {
seed, _ = strconv.ParseInt(tmp, 0, 64)
}
Logger.Println("Using random seed:", seed)
rand.Seed(seed)
proxyAddr := os.Getenv("TOXIPROXY_ADDR")
if proxyAddr == "" {
proxyAddr = VagrantToxiproxy
}
proxyClient = toxiproxy.NewClient(proxyAddr)
kafkaPeers := os.Getenv("KAFKA_PEERS")
if kafkaPeers == "" {
kafkaPeers = VagrantKafkaPeers
}
kafkaBrokers = strings.Split(kafkaPeers, ",")
if c, err := net.DialTimeout("tcp", kafkaBrokers[0], 5*time.Second); err == nil {
if err = c.Close(); err == nil {
kafkaAvailable = true
}
}
kafkaRequired = os.Getenv("CI") != ""
}
func checkKafkaAvailability(t testing.TB) {
if !kafkaAvailable {
if kafkaRequired {
t.Fatalf("Kafka broker is not available on %s. Set KAFKA_PEERS to connect to Kafka on a different location.", kafkaBrokers[0])
} else {
t.Skipf("Kafka broker is not available on %s. Set KAFKA_PEERS to connect to Kafka on a different location.", kafkaBrokers[0])
}
}
}
func checkKafkaVersion(t testing.TB, requiredVersion string) {
kafkaVersion := os.Getenv("KAFKA_VERSION")
if kafkaVersion == "" {
t.Logf("No KAFKA_VERSION set. This tests requires Kafka version %s or higher. Continuing...", requiredVersion)
} else {
available := parseKafkaVersion(kafkaVersion)
required := parseKafkaVersion(requiredVersion)
if !available.satisfies(required) {
t.Skipf("Kafka version %s is required for this test; you have %s. Skipping...", requiredVersion, kafkaVersion)
}
}
}
func resetProxies(t testing.TB) {
if err := proxyClient.ResetState(); err != nil {
t.Error(err)
}
Proxies = nil
}
func fetchProxies(t testing.TB) {
var err error
Proxies, err = proxyClient.Proxies()
if err != nil {
t.Fatal(err)
}
}
func SaveProxy(t *testing.T, px string) {
if err := Proxies[px].Save(); err != nil {
t.Fatal(err)
}
}
func setupFunctionalTest(t testing.TB) {
checkKafkaAvailability(t)
resetProxies(t)
fetchProxies(t)
}
func teardownFunctionalTest(t testing.TB) {
resetProxies(t)
}
type kafkaVersion []int
func (kv kafkaVersion) satisfies(other kafkaVersion) bool {
var ov int
for index, v := range kv {
if len(other) <= index {
ov = 0
} else {
ov = other[index]
}
if v < ov {
return false
}
}
return true
}
func parseKafkaVersion(version string) kafkaVersion {
numbers := strings.Split(version, ".")
result := make(kafkaVersion, 0, len(numbers))
for _, number := range numbers {
nr, _ := strconv.Atoi(number)
result = append(result, nr)
}
return result
}

View File

@@ -1,29 +0,0 @@
package sarama
import "encoding/binary"
// LengthField implements the PushEncoder and PushDecoder interfaces for calculating 4-byte lengths.
type lengthField struct {
startOffset int
}
func (l *lengthField) saveOffset(in int) {
l.startOffset = in
}
func (l *lengthField) reserveLength() int {
return 4
}
func (l *lengthField) run(curOffset int, buf []byte) error {
binary.BigEndian.PutUint32(buf[l.startOffset:], uint32(curOffset-l.startOffset-4))
return nil
}
func (l *lengthField) check(curOffset int, buf []byte) error {
if uint32(curOffset-l.startOffset-4) != binary.BigEndian.Uint32(buf[l.startOffset:]) {
return PacketDecodingError{"length field invalid"}
}
return nil
}

View File

@@ -1,154 +0,0 @@
package sarama
import (
"bytes"
"compress/gzip"
"fmt"
"io/ioutil"
)
// CompressionCodec represents the various compression codecs recognized by Kafka in messages.
type CompressionCodec int8
// only the last two bits are really used
const compressionCodecMask int8 = 0x03
const (
CompressionNone CompressionCodec = 0
CompressionGZIP CompressionCodec = 1
CompressionSnappy CompressionCodec = 2
)
// The spec just says: "This is a version id used to allow backwards compatible evolution of the message
// binary format." but it doesn't say what the current value is, so presumably 0...
const messageFormat int8 = 0
type Message struct {
Codec CompressionCodec // codec used to compress the message contents
Key []byte // the message key, may be nil
Value []byte // the message contents
Set *MessageSet // the message set a message might wrap
compressedCache []byte
}
func (m *Message) encode(pe packetEncoder) error {
pe.push(&crc32Field{})
pe.putInt8(messageFormat)
attributes := int8(m.Codec) & compressionCodecMask
pe.putInt8(attributes)
err := pe.putBytes(m.Key)
if err != nil {
return err
}
var payload []byte
if m.compressedCache != nil {
payload = m.compressedCache
m.compressedCache = nil
} else {
switch m.Codec {
case CompressionNone:
payload = m.Value
case CompressionGZIP:
var buf bytes.Buffer
writer := gzip.NewWriter(&buf)
if _, err = writer.Write(m.Value); err != nil {
return err
}
if err = writer.Close(); err != nil {
return err
}
m.compressedCache = buf.Bytes()
payload = m.compressedCache
case CompressionSnappy:
tmp := snappyEncode(m.Value)
m.compressedCache = tmp
payload = m.compressedCache
default:
return PacketEncodingError{fmt.Sprintf("unsupported compression codec (%d)", m.Codec)}
}
}
if err = pe.putBytes(payload); err != nil {
return err
}
return pe.pop()
}
func (m *Message) decode(pd packetDecoder) (err error) {
err = pd.push(&crc32Field{})
if err != nil {
return err
}
format, err := pd.getInt8()
if err != nil {
return err
}
if format != messageFormat {
return PacketDecodingError{"unexpected messageFormat"}
}
attribute, err := pd.getInt8()
if err != nil {
return err
}
m.Codec = CompressionCodec(attribute & compressionCodecMask)
m.Key, err = pd.getBytes()
if err != nil {
return err
}
m.Value, err = pd.getBytes()
if err != nil {
return err
}
switch m.Codec {
case CompressionNone:
// nothing to do
case CompressionGZIP:
if m.Value == nil {
return PacketDecodingError{"GZIP compression specified, but no data to uncompress"}
}
reader, err := gzip.NewReader(bytes.NewReader(m.Value))
if err != nil {
return err
}
if m.Value, err = ioutil.ReadAll(reader); err != nil {
return err
}
return m.decodeSet()
case CompressionSnappy:
if m.Value == nil {
return PacketDecodingError{"Snappy compression specified, but no data to uncompress"}
}
if m.Value, err = snappyDecode(m.Value); err != nil {
return err
}
return m.decodeSet()
default:
return PacketDecodingError{fmt.Sprintf("invalid compression specified (%d)", m.Codec)}
}
err = pd.pop()
if err != nil {
return err
}
return nil
}
// decodes a message set from a previousy encoded bulk-message
func (m *Message) decodeSet() (err error) {
pd := realDecoder{raw: m.Value}
m.Set = &MessageSet{}
return m.Set.decode(&pd)
}

View File

@@ -1,89 +0,0 @@
package sarama
type MessageBlock struct {
Offset int64
Msg *Message
}
// Messages convenience helper which returns either all the
// messages that are wrapped in this block
func (msb *MessageBlock) Messages() []*MessageBlock {
if msb.Msg.Set != nil {
return msb.Msg.Set.Messages
}
return []*MessageBlock{msb}
}
func (msb *MessageBlock) encode(pe packetEncoder) error {
pe.putInt64(msb.Offset)
pe.push(&lengthField{})
err := msb.Msg.encode(pe)
if err != nil {
return err
}
return pe.pop()
}
func (msb *MessageBlock) decode(pd packetDecoder) (err error) {
if msb.Offset, err = pd.getInt64(); err != nil {
return err
}
if err = pd.push(&lengthField{}); err != nil {
return err
}
msb.Msg = new(Message)
if err = msb.Msg.decode(pd); err != nil {
return err
}
if err = pd.pop(); err != nil {
return err
}
return nil
}
type MessageSet struct {
PartialTrailingMessage bool // whether the set on the wire contained an incomplete trailing MessageBlock
Messages []*MessageBlock
}
func (ms *MessageSet) encode(pe packetEncoder) error {
for i := range ms.Messages {
err := ms.Messages[i].encode(pe)
if err != nil {
return err
}
}
return nil
}
func (ms *MessageSet) decode(pd packetDecoder) (err error) {
ms.Messages = nil
for pd.remaining() > 0 {
msb := new(MessageBlock)
err = msb.decode(pd)
switch err {
case nil:
ms.Messages = append(ms.Messages, msb)
case ErrInsufficientData:
// As an optimization the server is allowed to return a partial message at the
// end of the message set. Clients should handle this case. So we just ignore such things.
ms.PartialTrailingMessage = true
return nil
default:
return err
}
}
return nil
}
func (ms *MessageSet) addMessage(msg *Message) {
block := new(MessageBlock)
block.Msg = msg
ms.Messages = append(ms.Messages, block)
}

View File

@@ -1,113 +0,0 @@
package sarama
import "testing"
var (
emptyMessage = []byte{
167, 236, 104, 3, // CRC
0x00, // magic version byte
0x00, // attribute flags
0xFF, 0xFF, 0xFF, 0xFF, // key
0xFF, 0xFF, 0xFF, 0xFF} // value
emptyGzipMessage = []byte{
97, 79, 149, 90, //CRC
0x00, // magic version byte
0x01, // attribute flags
0xFF, 0xFF, 0xFF, 0xFF, // key
// value
0x00, 0x00, 0x00, 0x17,
0x1f, 0x8b,
0x08,
0, 0, 9, 110, 136, 0, 255, 1, 0, 0, 255, 255, 0, 0, 0, 0, 0, 0, 0, 0}
emptyBulkSnappyMessage = []byte{
180, 47, 53, 209, //CRC
0x00, // magic version byte
0x02, // attribute flags
0xFF, 0xFF, 0xFF, 0xFF, // key
0, 0, 0, 42,
130, 83, 78, 65, 80, 80, 89, 0, // SNAPPY magic
0, 0, 0, 1, // min version
0, 0, 0, 1, // default version
0, 0, 0, 22, 52, 0, 0, 25, 1, 16, 14, 227, 138, 104, 118, 25, 15, 13, 1, 8, 1, 0, 0, 62, 26, 0}
emptyBulkGzipMessage = []byte{
139, 160, 63, 141, //CRC
0x00, // magic version byte
0x01, // attribute flags
0xFF, 0xFF, 0xFF, 0xFF, // key
0x00, 0x00, 0x00, 0x27, // len
0x1f, 0x8b, // Gzip Magic
0x08, // deflate compressed
0, 0, 0, 0, 0, 0, 0, 99, 96, 128, 3, 190, 202, 112, 143, 7, 12, 12, 255, 129, 0, 33, 200, 192, 136, 41, 3, 0, 199, 226, 155, 70, 52, 0, 0, 0}
)
func TestMessageEncoding(t *testing.T) {
message := Message{}
testEncodable(t, "empty", &message, emptyMessage)
message.Value = []byte{}
message.Codec = CompressionGZIP
testEncodable(t, "empty gzip", &message, emptyGzipMessage)
}
func TestMessageDecoding(t *testing.T) {
message := Message{}
testDecodable(t, "empty", &message, emptyMessage)
if message.Codec != CompressionNone {
t.Error("Decoding produced compression codec where there was none.")
}
if message.Key != nil {
t.Error("Decoding produced key where there was none.")
}
if message.Value != nil {
t.Error("Decoding produced value where there was none.")
}
if message.Set != nil {
t.Error("Decoding produced set where there was none.")
}
testDecodable(t, "empty gzip", &message, emptyGzipMessage)
if message.Codec != CompressionGZIP {
t.Error("Decoding produced incorrect compression codec (was gzip).")
}
if message.Key != nil {
t.Error("Decoding produced key where there was none.")
}
if message.Value == nil || len(message.Value) != 0 {
t.Error("Decoding produced nil or content-ful value where there was an empty array.")
}
}
func TestMessageDecodingBulkSnappy(t *testing.T) {
message := Message{}
testDecodable(t, "bulk snappy", &message, emptyBulkSnappyMessage)
if message.Codec != CompressionSnappy {
t.Errorf("Decoding produced codec %d, but expected %d.", message.Codec, CompressionSnappy)
}
if message.Key != nil {
t.Errorf("Decoding produced key %+v, but none was expected.", message.Key)
}
if message.Set == nil {
t.Error("Decoding produced no set, but one was expected.")
} else if len(message.Set.Messages) != 2 {
t.Errorf("Decoding produced a set with %d messages, but 2 were expected.", len(message.Set.Messages))
}
}
func TestMessageDecodingBulkGzip(t *testing.T) {
message := Message{}
testDecodable(t, "bulk gzip", &message, emptyBulkGzipMessage)
if message.Codec != CompressionGZIP {
t.Errorf("Decoding produced codec %d, but expected %d.", message.Codec, CompressionGZIP)
}
if message.Key != nil {
t.Errorf("Decoding produced key %+v, but none was expected.", message.Key)
}
if message.Set == nil {
t.Error("Decoding produced no set, but one was expected.")
} else if len(message.Set.Messages) != 2 {
t.Errorf("Decoding produced a set with %d messages, but 2 were expected.", len(message.Set.Messages))
}
}

View File

@@ -1,48 +0,0 @@
package sarama
type MetadataRequest struct {
Topics []string
}
func (mr *MetadataRequest) encode(pe packetEncoder) error {
err := pe.putArrayLength(len(mr.Topics))
if err != nil {
return err
}
for i := range mr.Topics {
err = pe.putString(mr.Topics[i])
if err != nil {
return err
}
}
return nil
}
func (mr *MetadataRequest) decode(pd packetDecoder) error {
topicCount, err := pd.getArrayLength()
if err != nil {
return err
}
if topicCount == 0 {
return nil
}
mr.Topics = make([]string, topicCount)
for i := range mr.Topics {
topic, err := pd.getString()
if err != nil {
return err
}
mr.Topics[i] = topic
}
return nil
}
func (mr *MetadataRequest) key() int16 {
return 3
}
func (mr *MetadataRequest) version() int16 {
return 0
}

View File

@@ -1,29 +0,0 @@
package sarama
import "testing"
var (
metadataRequestNoTopics = []byte{
0x00, 0x00, 0x00, 0x00}
metadataRequestOneTopic = []byte{
0x00, 0x00, 0x00, 0x01,
0x00, 0x06, 't', 'o', 'p', 'i', 'c', '1'}
metadataRequestThreeTopics = []byte{
0x00, 0x00, 0x00, 0x03,
0x00, 0x03, 'f', 'o', 'o',
0x00, 0x03, 'b', 'a', 'r',
0x00, 0x03, 'b', 'a', 'z'}
)
func TestMetadataRequest(t *testing.T) {
request := new(MetadataRequest)
testRequest(t, "no topics", request, metadataRequestNoTopics)
request.Topics = []string{"topic1"}
testRequest(t, "one topic", request, metadataRequestOneTopic)
request.Topics = []string{"foo", "bar", "baz"}
testRequest(t, "three topics", request, metadataRequestThreeTopics)
}

View File

@@ -1,227 +0,0 @@
package sarama
type PartitionMetadata struct {
Err KError
ID int32
Leader int32
Replicas []int32
Isr []int32
}
func (pm *PartitionMetadata) decode(pd packetDecoder) (err error) {
tmp, err := pd.getInt16()
if err != nil {
return err
}
pm.Err = KError(tmp)
pm.ID, err = pd.getInt32()
if err != nil {
return err
}
pm.Leader, err = pd.getInt32()
if err != nil {
return err
}
pm.Replicas, err = pd.getInt32Array()
if err != nil {
return err
}
pm.Isr, err = pd.getInt32Array()
if err != nil {
return err
}
return nil
}
func (pm *PartitionMetadata) encode(pe packetEncoder) (err error) {
pe.putInt16(int16(pm.Err))
pe.putInt32(pm.ID)
pe.putInt32(pm.Leader)
err = pe.putInt32Array(pm.Replicas)
if err != nil {
return err
}
err = pe.putInt32Array(pm.Isr)
if err != nil {
return err
}
return nil
}
type TopicMetadata struct {
Err KError
Name string
Partitions []*PartitionMetadata
}
func (tm *TopicMetadata) decode(pd packetDecoder) (err error) {
tmp, err := pd.getInt16()
if err != nil {
return err
}
tm.Err = KError(tmp)
tm.Name, err = pd.getString()
if err != nil {
return err
}
n, err := pd.getArrayLength()
if err != nil {
return err
}
tm.Partitions = make([]*PartitionMetadata, n)
for i := 0; i < n; i++ {
tm.Partitions[i] = new(PartitionMetadata)
err = tm.Partitions[i].decode(pd)
if err != nil {
return err
}
}
return nil
}
func (tm *TopicMetadata) encode(pe packetEncoder) (err error) {
pe.putInt16(int16(tm.Err))
err = pe.putString(tm.Name)
if err != nil {
return err
}
err = pe.putArrayLength(len(tm.Partitions))
if err != nil {
return err
}
for _, pm := range tm.Partitions {
err = pm.encode(pe)
if err != nil {
return err
}
}
return nil
}
type MetadataResponse struct {
Brokers []*Broker
Topics []*TopicMetadata
}
func (m *MetadataResponse) decode(pd packetDecoder) (err error) {
n, err := pd.getArrayLength()
if err != nil {
return err
}
m.Brokers = make([]*Broker, n)
for i := 0; i < n; i++ {
m.Brokers[i] = new(Broker)
err = m.Brokers[i].decode(pd)
if err != nil {
return err
}
}
n, err = pd.getArrayLength()
if err != nil {
return err
}
m.Topics = make([]*TopicMetadata, n)
for i := 0; i < n; i++ {
m.Topics[i] = new(TopicMetadata)
err = m.Topics[i].decode(pd)
if err != nil {
return err
}
}
return nil
}
func (m *MetadataResponse) encode(pe packetEncoder) error {
err := pe.putArrayLength(len(m.Brokers))
if err != nil {
return err
}
for _, broker := range m.Brokers {
err = broker.encode(pe)
if err != nil {
return err
}
}
err = pe.putArrayLength(len(m.Topics))
if err != nil {
return err
}
for _, tm := range m.Topics {
err = tm.encode(pe)
if err != nil {
return err
}
}
return nil
}
// testing API
func (m *MetadataResponse) AddBroker(addr string, id int32) {
m.Brokers = append(m.Brokers, &Broker{id: id, addr: addr})
}
func (m *MetadataResponse) AddTopic(topic string, err KError) *TopicMetadata {
var tmatch *TopicMetadata
for _, tm := range m.Topics {
if tm.Name == topic {
tmatch = tm
goto foundTopic
}
}
tmatch = new(TopicMetadata)
tmatch.Name = topic
m.Topics = append(m.Topics, tmatch)
foundTopic:
tmatch.Err = err
return tmatch
}
func (m *MetadataResponse) AddTopicPartition(topic string, partition, brokerID int32, replicas, isr []int32, err KError) {
tmatch := m.AddTopic(topic, ErrNoError)
var pmatch *PartitionMetadata
for _, pm := range tmatch.Partitions {
if pm.ID == partition {
pmatch = pm
goto foundPartition
}
}
pmatch = new(PartitionMetadata)
pmatch.ID = partition
tmatch.Partitions = append(tmatch.Partitions, pmatch)
foundPartition:
pmatch.Leader = brokerID
pmatch.Replicas = replicas
pmatch.Isr = isr
pmatch.Err = err
}

View File

@@ -1,139 +0,0 @@
package sarama
import "testing"
var (
emptyMetadataResponse = []byte{
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00}
brokersNoTopicsMetadataResponse = []byte{
0x00, 0x00, 0x00, 0x02,
0x00, 0x00, 0xab, 0xff,
0x00, 0x09, 'l', 'o', 'c', 'a', 'l', 'h', 'o', 's', 't',
0x00, 0x00, 0x00, 0x33,
0x00, 0x01, 0x02, 0x03,
0x00, 0x0a, 'g', 'o', 'o', 'g', 'l', 'e', '.', 'c', 'o', 'm',
0x00, 0x00, 0x01, 0x11,
0x00, 0x00, 0x00, 0x00}
topicsNoBrokersMetadataResponse = []byte{
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x02,
0x00, 0x00,
0x00, 0x03, 'f', 'o', 'o',
0x00, 0x00, 0x00, 0x01,
0x00, 0x04,
0x00, 0x00, 0x00, 0x01,
0x00, 0x00, 0x00, 0x07,
0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x03,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00,
0x00, 0x03, 'b', 'a', 'r',
0x00, 0x00, 0x00, 0x00}
)
func TestEmptyMetadataResponse(t *testing.T) {
response := MetadataResponse{}
testDecodable(t, "empty", &response, emptyMetadataResponse)
if len(response.Brokers) != 0 {
t.Error("Decoding produced", len(response.Brokers), "brokers where there were none!")
}
if len(response.Topics) != 0 {
t.Error("Decoding produced", len(response.Topics), "topics where there were none!")
}
}
func TestMetadataResponseWithBrokers(t *testing.T) {
response := MetadataResponse{}
testDecodable(t, "brokers, no topics", &response, brokersNoTopicsMetadataResponse)
if len(response.Brokers) != 2 {
t.Fatal("Decoding produced", len(response.Brokers), "brokers where there were two!")
}
if response.Brokers[0].id != 0xabff {
t.Error("Decoding produced invalid broker 0 id.")
}
if response.Brokers[0].addr != "localhost:51" {
t.Error("Decoding produced invalid broker 0 address.")
}
if response.Brokers[1].id != 0x010203 {
t.Error("Decoding produced invalid broker 1 id.")
}
if response.Brokers[1].addr != "google.com:273" {
t.Error("Decoding produced invalid broker 1 address.")
}
if len(response.Topics) != 0 {
t.Error("Decoding produced", len(response.Topics), "topics where there were none!")
}
}
func TestMetadataResponseWithTopics(t *testing.T) {
response := MetadataResponse{}
testDecodable(t, "topics, no brokers", &response, topicsNoBrokersMetadataResponse)
if len(response.Brokers) != 0 {
t.Error("Decoding produced", len(response.Brokers), "brokers where there were none!")
}
if len(response.Topics) != 2 {
t.Fatal("Decoding produced", len(response.Topics), "topics where there were two!")
}
if response.Topics[0].Err != ErrNoError {
t.Error("Decoding produced invalid topic 0 error.")
}
if response.Topics[0].Name != "foo" {
t.Error("Decoding produced invalid topic 0 name.")
}
if len(response.Topics[0].Partitions) != 1 {
t.Fatal("Decoding produced invalid partition count for topic 0.")
}
if response.Topics[0].Partitions[0].Err != ErrInvalidMessageSize {
t.Error("Decoding produced invalid topic 0 partition 0 error.")
}
if response.Topics[0].Partitions[0].ID != 0x01 {
t.Error("Decoding produced invalid topic 0 partition 0 id.")
}
if response.Topics[0].Partitions[0].Leader != 0x07 {
t.Error("Decoding produced invalid topic 0 partition 0 leader.")
}
if len(response.Topics[0].Partitions[0].Replicas) != 3 {
t.Fatal("Decoding produced invalid topic 0 partition 0 replicas.")
}
for i := 0; i < 3; i++ {
if response.Topics[0].Partitions[0].Replicas[i] != int32(i+1) {
t.Error("Decoding produced invalid topic 0 partition 0 replica", i)
}
}
if len(response.Topics[0].Partitions[0].Isr) != 0 {
t.Error("Decoding produced invalid topic 0 partition 0 isr length.")
}
if response.Topics[1].Err != ErrNoError {
t.Error("Decoding produced invalid topic 1 error.")
}
if response.Topics[1].Name != "bar" {
t.Error("Decoding produced invalid topic 0 name.")
}
if len(response.Topics[1].Partitions) != 0 {
t.Error("Decoding produced invalid partition count for topic 1.")
}
}

View File

@@ -1,273 +0,0 @@
package sarama
import (
"bytes"
"encoding/binary"
"fmt"
"io"
"net"
"reflect"
"strconv"
"sync"
"testing"
"time"
"github.com/davecgh/go-spew/spew"
)
const (
expectationTimeout = 500 * time.Millisecond
)
type requestHandlerFunc func(req *request) (res encoder)
// mockBroker is a mock Kafka broker. It consists of a TCP server on a
// kernel-selected localhost port that can accept many connections. It reads
// Kafka requests from that connection and passes them to the user specified
// handler function (see SetHandler) that generates respective responses. If
// the handler has not been explicitly specified then the broker returns
// responses set by the Returns function in the exact order they were provided.
// (if a response has a len of 0, nothing is sent, and the client request will
// timeout in this case).
//
// When running tests with one of these, it is strongly recommended to specify
// a timeout to `go test` so that if the broker hangs waiting for a response,
// the test panics.
//
// It is not necessary to prefix message length or correlation ID to your
// response bytes, the server does that automatically as a convenience.
type mockBroker struct {
brokerID int32
port int32
closing chan none
stopper chan none
expectations chan encoder
listener net.Listener
t *testing.T
latency time.Duration
handler requestHandlerFunc
history []RequestResponse
lock sync.Mutex
}
type RequestResponse struct {
Request requestBody
Response encoder
}
func (b *mockBroker) SetLatency(latency time.Duration) {
b.latency = latency
}
// SetHandler sets the specified function as the request handler. Whenever
// a mock broker reads a request from the wire it passes the request to the
// function and sends back whatever the handler function returns.
func (b *mockBroker) SetHandler(handler requestHandlerFunc) {
b.lock.Lock()
b.handler = handler
b.lock.Unlock()
}
func (b *mockBroker) SetHandlerByMap(handlerMap map[string]MockResponse) {
b.SetHandler(func(req *request) (res encoder) {
reqTypeName := reflect.TypeOf(req.body).Elem().Name()
mockResponse := handlerMap[reqTypeName]
if mockResponse == nil {
return nil
}
return mockResponse.For(req.body)
})
}
func (b *mockBroker) BrokerID() int32 {
return b.brokerID
}
func (b *mockBroker) History() []RequestResponse {
b.lock.Lock()
history := make([]RequestResponse, len(b.history))
copy(history, b.history)
b.lock.Unlock()
return history
}
func (b *mockBroker) Port() int32 {
return b.port
}
func (b *mockBroker) Addr() string {
return b.listener.Addr().String()
}
func (b *mockBroker) Close() {
close(b.expectations)
if len(b.expectations) > 0 {
buf := bytes.NewBufferString(fmt.Sprintf("mockbroker/%d: not all expectations were satisfied! Still waiting on:\n", b.BrokerID()))
for e := range b.expectations {
_, _ = buf.WriteString(spew.Sdump(e))
}
b.t.Error(buf.String())
}
close(b.closing)
<-b.stopper
}
func (b *mockBroker) serverLoop() {
defer close(b.stopper)
var err error
var conn net.Conn
go func() {
<-b.closing
safeClose(b.t, b.listener)
}()
wg := &sync.WaitGroup{}
i := 0
for conn, err = b.listener.Accept(); err == nil; conn, err = b.listener.Accept() {
wg.Add(1)
go b.handleRequests(conn, i, wg)
i++
}
wg.Wait()
Logger.Printf("*** mockbroker/%d: listener closed, err=%v", b.BrokerID(), err)
}
func (b *mockBroker) handleRequests(conn net.Conn, idx int, wg *sync.WaitGroup) {
defer wg.Done()
defer func() {
_ = conn.Close()
}()
Logger.Printf("*** mockbroker/%d/%d: connection opened", b.BrokerID(), idx)
var err error
abort := make(chan none)
defer close(abort)
go func() {
select {
case <-b.closing:
_ = conn.Close()
case <-abort:
}
}()
resHeader := make([]byte, 8)
for {
req, err := decodeRequest(conn)
if err != nil {
Logger.Printf("*** mockbroker/%d/%d: invalid request: err=%+v, %+v", b.brokerID, idx, err, spew.Sdump(req))
b.serverError(err)
break
}
if b.latency > 0 {
time.Sleep(b.latency)
}
b.lock.Lock()
res := b.handler(req)
b.history = append(b.history, RequestResponse{req.body, res})
b.lock.Unlock()
if res == nil {
Logger.Printf("*** mockbroker/%d/%d: ignored %v", b.brokerID, idx, spew.Sdump(req))
continue
}
Logger.Printf("*** mockbroker/%d/%d: served %v -> %v", b.brokerID, idx, req, res)
encodedRes, err := encode(res)
if err != nil {
b.serverError(err)
break
}
if len(encodedRes) == 0 {
continue
}
binary.BigEndian.PutUint32(resHeader, uint32(len(encodedRes)+4))
binary.BigEndian.PutUint32(resHeader[4:], uint32(req.correlationID))
if _, err = conn.Write(resHeader); err != nil {
b.serverError(err)
break
}
if _, err = conn.Write(encodedRes); err != nil {
b.serverError(err)
break
}
}
Logger.Printf("*** mockbroker/%d/%d: connection closed, err=%v", b.BrokerID(), idx, err)
}
func (b *mockBroker) defaultRequestHandler(req *request) (res encoder) {
select {
case res, ok := <-b.expectations:
if !ok {
return nil
}
return res
case <-time.After(expectationTimeout):
return nil
}
}
func (b *mockBroker) serverError(err error) {
isConnectionClosedError := false
if _, ok := err.(*net.OpError); ok {
isConnectionClosedError = true
} else if err == io.EOF {
isConnectionClosedError = true
} else if err.Error() == "use of closed network connection" {
isConnectionClosedError = true
}
if isConnectionClosedError {
return
}
b.t.Errorf(err.Error())
}
// newMockBroker launches a fake Kafka broker. It takes a *testing.T as provided by the
// test framework and a channel of responses to use. If an error occurs it is
// simply logged to the *testing.T and the broker exits.
func newMockBroker(t *testing.T, brokerID int32) *mockBroker {
return newMockBrokerAddr(t, brokerID, "localhost:0")
}
// newMockBrokerAddr behaves like newMockBroker but listens on the address you give
// it rather than just some ephemeral port.
func newMockBrokerAddr(t *testing.T, brokerID int32, addr string) *mockBroker {
var err error
broker := &mockBroker{
closing: make(chan none),
stopper: make(chan none),
t: t,
brokerID: brokerID,
expectations: make(chan encoder, 512),
}
broker.handler = broker.defaultRequestHandler
broker.listener, err = net.Listen("tcp", addr)
if err != nil {
t.Fatal(err)
}
Logger.Printf("*** mockbroker/%d listening on %s\n", brokerID, broker.listener.Addr().String())
_, portStr, err := net.SplitHostPort(broker.listener.Addr().String())
if err != nil {
t.Fatal(err)
}
tmp, err := strconv.ParseInt(portStr, 10, 32)
if err != nil {
t.Fatal(err)
}
broker.port = int32(tmp)
go broker.serverLoop()
return broker
}
func (b *mockBroker) Returns(e encoder) {
b.expectations <- e
}

View File

@@ -1,411 +0,0 @@
package sarama
import (
"testing"
)
// MockResponse is a response builder interface it defines one method that
// allows generating a response based on a request body.
type MockResponse interface {
For(reqBody decoder) (res encoder)
}
type mockWrapper struct {
res encoder
}
func (mw *mockWrapper) For(reqBody decoder) (res encoder) {
return mw.res
}
func newMockWrapper(res encoder) *mockWrapper {
return &mockWrapper{res: res}
}
// mockMetadataResponse is a `MetadataResponse` builder.
type mockMetadataResponse struct {
leaders map[string]map[int32]int32
brokers map[string]int32
t *testing.T
}
func newMockMetadataResponse(t *testing.T) *mockMetadataResponse {
return &mockMetadataResponse{
leaders: make(map[string]map[int32]int32),
brokers: make(map[string]int32),
t: t,
}
}
func (mmr *mockMetadataResponse) SetLeader(topic string, partition, brokerID int32) *mockMetadataResponse {
partitions := mmr.leaders[topic]
if partitions == nil {
partitions = make(map[int32]int32)
mmr.leaders[topic] = partitions
}
partitions[partition] = brokerID
return mmr
}
func (mmr *mockMetadataResponse) SetBroker(addr string, brokerID int32) *mockMetadataResponse {
mmr.brokers[addr] = brokerID
return mmr
}
func (mor *mockMetadataResponse) For(reqBody decoder) encoder {
metadataRequest := reqBody.(*MetadataRequest)
metadataResponse := &MetadataResponse{}
for addr, brokerID := range mor.brokers {
metadataResponse.AddBroker(addr, brokerID)
}
if len(metadataRequest.Topics) == 0 {
for topic, partitions := range mor.leaders {
for partition, brokerID := range partitions {
metadataResponse.AddTopicPartition(topic, partition, brokerID, nil, nil, ErrNoError)
}
}
return metadataResponse
}
for _, topic := range metadataRequest.Topics {
for partition, brokerID := range mor.leaders[topic] {
metadataResponse.AddTopicPartition(topic, partition, brokerID, nil, nil, ErrNoError)
}
}
return metadataResponse
}
// mockOffsetResponse is an `OffsetResponse` builder.
type mockOffsetResponse struct {
offsets map[string]map[int32]map[int64]int64
t *testing.T
}
func newMockOffsetResponse(t *testing.T) *mockOffsetResponse {
return &mockOffsetResponse{
offsets: make(map[string]map[int32]map[int64]int64),
t: t,
}
}
func (mor *mockOffsetResponse) SetOffset(topic string, partition int32, time, offset int64) *mockOffsetResponse {
partitions := mor.offsets[topic]
if partitions == nil {
partitions = make(map[int32]map[int64]int64)
mor.offsets[topic] = partitions
}
times := partitions[partition]
if times == nil {
times = make(map[int64]int64)
partitions[partition] = times
}
times[time] = offset
return mor
}
func (mor *mockOffsetResponse) For(reqBody decoder) encoder {
offsetRequest := reqBody.(*OffsetRequest)
offsetResponse := &OffsetResponse{}
for topic, partitions := range offsetRequest.blocks {
for partition, block := range partitions {
offset := mor.getOffset(topic, partition, block.time)
offsetResponse.AddTopicPartition(topic, partition, offset)
}
}
return offsetResponse
}
func (mor *mockOffsetResponse) getOffset(topic string, partition int32, time int64) int64 {
partitions := mor.offsets[topic]
if partitions == nil {
mor.t.Errorf("missing topic: %s", topic)
}
times := partitions[partition]
if times == nil {
mor.t.Errorf("missing partition: %d", partition)
}
offset, ok := times[time]
if !ok {
mor.t.Errorf("missing time: %d", time)
}
return offset
}
// mockFetchResponse is a `FetchResponse` builder.
type mockFetchResponse struct {
messages map[string]map[int32]map[int64]Encoder
highWaterMarks map[string]map[int32]int64
t *testing.T
batchSize int
}
func newMockFetchResponse(t *testing.T, batchSize int) *mockFetchResponse {
return &mockFetchResponse{
messages: make(map[string]map[int32]map[int64]Encoder),
highWaterMarks: make(map[string]map[int32]int64),
t: t,
batchSize: batchSize,
}
}
func (mfr *mockFetchResponse) SetMessage(topic string, partition int32, offset int64, msg Encoder) *mockFetchResponse {
partitions := mfr.messages[topic]
if partitions == nil {
partitions = make(map[int32]map[int64]Encoder)
mfr.messages[topic] = partitions
}
messages := partitions[partition]
if messages == nil {
messages = make(map[int64]Encoder)
partitions[partition] = messages
}
messages[offset] = msg
return mfr
}
func (mfr *mockFetchResponse) SetHighWaterMark(topic string, partition int32, offset int64) *mockFetchResponse {
partitions := mfr.highWaterMarks[topic]
if partitions == nil {
partitions = make(map[int32]int64)
mfr.highWaterMarks[topic] = partitions
}
partitions[partition] = offset
return mfr
}
func (mfr *mockFetchResponse) For(reqBody decoder) encoder {
fetchRequest := reqBody.(*FetchRequest)
res := &FetchResponse{}
for topic, partitions := range fetchRequest.blocks {
for partition, block := range partitions {
initialOffset := block.fetchOffset
offset := initialOffset
maxOffset := initialOffset + int64(mfr.getMessageCount(topic, partition))
for i := 0; i < mfr.batchSize && offset < maxOffset; {
msg := mfr.getMessage(topic, partition, offset)
if msg != nil {
res.AddMessage(topic, partition, nil, msg, offset)
i++
}
offset++
}
fb := res.GetBlock(topic, partition)
if fb == nil {
res.AddError(topic, partition, ErrNoError)
fb = res.GetBlock(topic, partition)
}
fb.HighWaterMarkOffset = mfr.getHighWaterMark(topic, partition)
}
}
return res
}
func (mfr *mockFetchResponse) getMessage(topic string, partition int32, offset int64) Encoder {
partitions := mfr.messages[topic]
if partitions == nil {
return nil
}
messages := partitions[partition]
if messages == nil {
return nil
}
return messages[offset]
}
func (mfr *mockFetchResponse) getMessageCount(topic string, partition int32) int {
partitions := mfr.messages[topic]
if partitions == nil {
return 0
}
messages := partitions[partition]
if messages == nil {
return 0
}
return len(messages)
}
func (mfr *mockFetchResponse) getHighWaterMark(topic string, partition int32) int64 {
partitions := mfr.highWaterMarks[topic]
if partitions == nil {
return 0
}
return partitions[partition]
}
// mockConsumerMetadataResponse is a `ConsumerMetadataResponse` builder.
type mockConsumerMetadataResponse struct {
coordinators map[string]interface{}
t *testing.T
}
func newMockConsumerMetadataResponse(t *testing.T) *mockConsumerMetadataResponse {
return &mockConsumerMetadataResponse{
coordinators: make(map[string]interface{}),
t: t,
}
}
func (mr *mockConsumerMetadataResponse) SetCoordinator(group string, broker *mockBroker) *mockConsumerMetadataResponse {
mr.coordinators[group] = broker
return mr
}
func (mr *mockConsumerMetadataResponse) SetError(group string, kerror KError) *mockConsumerMetadataResponse {
mr.coordinators[group] = kerror
return mr
}
func (mr *mockConsumerMetadataResponse) For(reqBody decoder) encoder {
req := reqBody.(*ConsumerMetadataRequest)
group := req.ConsumerGroup
res := &ConsumerMetadataResponse{}
v := mr.coordinators[group]
switch v := v.(type) {
case *mockBroker:
res.Coordinator = &Broker{id: v.BrokerID(), addr: v.Addr()}
case KError:
res.Err = v
}
return res
}
// mockOffsetCommitResponse is a `OffsetCommitResponse` builder.
type mockOffsetCommitResponse struct {
errors map[string]map[string]map[int32]KError
t *testing.T
}
func newMockOffsetCommitResponse(t *testing.T) *mockOffsetCommitResponse {
return &mockOffsetCommitResponse{t: t}
}
func (mr *mockOffsetCommitResponse) SetError(group, topic string, partition int32, kerror KError) *mockOffsetCommitResponse {
if mr.errors == nil {
mr.errors = make(map[string]map[string]map[int32]KError)
}
topics := mr.errors[group]
if topics == nil {
topics = make(map[string]map[int32]KError)
mr.errors[group] = topics
}
partitions := topics[topic]
if partitions == nil {
partitions = make(map[int32]KError)
topics[topic] = partitions
}
partitions[partition] = kerror
return mr
}
func (mr *mockOffsetCommitResponse) For(reqBody decoder) encoder {
req := reqBody.(*OffsetCommitRequest)
group := req.ConsumerGroup
res := &OffsetCommitResponse{}
for topic, partitions := range req.blocks {
for partition := range partitions {
res.AddError(topic, partition, mr.getError(group, topic, partition))
}
}
return res
}
func (mr *mockOffsetCommitResponse) getError(group, topic string, partition int32) KError {
topics := mr.errors[group]
if topics == nil {
return ErrNoError
}
partitions := topics[topic]
if partitions == nil {
return ErrNoError
}
kerror, ok := partitions[partition]
if !ok {
return ErrNoError
}
return kerror
}
// mockProduceResponse is a `ProduceResponse` builder.
type mockProduceResponse struct {
errors map[string]map[int32]KError
t *testing.T
}
func newMockProduceResponse(t *testing.T) *mockProduceResponse {
return &mockProduceResponse{t: t}
}
func (mr *mockProduceResponse) SetError(topic string, partition int32, kerror KError) *mockProduceResponse {
if mr.errors == nil {
mr.errors = make(map[string]map[int32]KError)
}
partitions := mr.errors[topic]
if partitions == nil {
partitions = make(map[int32]KError)
mr.errors[topic] = partitions
}
partitions[partition] = kerror
return mr
}
func (mr *mockProduceResponse) For(reqBody decoder) encoder {
req := reqBody.(*ProduceRequest)
res := &ProduceResponse{}
for topic, partitions := range req.msgSets {
for partition := range partitions {
res.AddTopicPartition(topic, partition, mr.getError(topic, partition))
}
}
return res
}
func (mr *mockProduceResponse) getError(topic string, partition int32) KError {
partitions := mr.errors[topic]
if partitions == nil {
return ErrNoError
}
kerror, ok := partitions[partition]
if !ok {
return ErrNoError
}
return kerror
}
// mockOffsetFetchResponse is a `OffsetFetchResponse` builder.
type mockOffsetFetchResponse struct {
offsets map[string]map[string]map[int32]*OffsetFetchResponseBlock
t *testing.T
}
func newMockOffsetFetchResponse(t *testing.T) *mockOffsetFetchResponse {
return &mockOffsetFetchResponse{t: t}
}
func (mr *mockOffsetFetchResponse) SetOffset(group, topic string, partition int32, offset int64, metadata string, kerror KError) *mockOffsetFetchResponse {
if mr.offsets == nil {
mr.offsets = make(map[string]map[string]map[int32]*OffsetFetchResponseBlock)
}
topics := mr.offsets[group]
if topics == nil {
topics = make(map[string]map[int32]*OffsetFetchResponseBlock)
mr.offsets[group] = topics
}
partitions := topics[topic]
if partitions == nil {
partitions = make(map[int32]*OffsetFetchResponseBlock)
topics[topic] = partitions
}
partitions[partition] = &OffsetFetchResponseBlock{offset, metadata, kerror}
return mr
}
func (mr *mockOffsetFetchResponse) For(reqBody decoder) encoder {
req := reqBody.(*OffsetFetchRequest)
group := req.ConsumerGroup
res := &OffsetFetchResponse{}
for topic, partitions := range mr.offsets[group] {
for partition, block := range partitions {
res.AddBlock(topic, partition, block)
}
}
return res
}

View File

@@ -1,13 +0,0 @@
# sarama/mocks
The `mocks` subpackage includes mock implementations that implement the interfaces of the major sarama types.
You can use them to test your sarama applications using dependency injection.
The following mock objects are available:
- [Consumer](https://godoc.org/github.com/Shopify/sarama/mocks#Consumer), which will create [PartitionConsumer](https://godoc.org/github.com/Shopify/sarama/mocks#PartitionConsumer) mocks.
- [AsyncProducer](https://godoc.org/github.com/Shopify/sarama/mocks#AsyncProducer)
- [SyncProducer](https://godoc.org/github.com/Shopify/sarama/mocks#SyncProducer)
The mocks allow you to set expectations on them. When you close the mocks, the expectations will be verified,
and the results will be reported to the `*testing.T` object you provided when creating the mock.

View File

@@ -1,142 +0,0 @@
package mocks
import (
"sync"
"github.com/Shopify/sarama"
)
// AsyncProducer implements sarama's Producer interface for testing purposes.
// Before you can send messages to it's Input channel, you have to set expectations
// so it knows how to handle the input. This way you can easily test success and
// failure scenarios.
type AsyncProducer struct {
l sync.Mutex
t ErrorReporter
expectations []*producerExpectation
closed chan struct{}
input chan *sarama.ProducerMessage
successes chan *sarama.ProducerMessage
errors chan *sarama.ProducerError
lastOffset int64
}
// NewAsyncProducer instantiates a new Producer mock. The t argument should
// be the *testing.T instance of your test method. An error will be written to it if
// an expectation is violated. The config argument is used to determine whether it
// should ack successes on the Successes channel.
func NewAsyncProducer(t ErrorReporter, config *sarama.Config) *AsyncProducer {
if config == nil {
config = sarama.NewConfig()
}
mp := &AsyncProducer{
t: t,
closed: make(chan struct{}, 0),
expectations: make([]*producerExpectation, 0),
input: make(chan *sarama.ProducerMessage, config.ChannelBufferSize),
successes: make(chan *sarama.ProducerMessage, config.ChannelBufferSize),
errors: make(chan *sarama.ProducerError, config.ChannelBufferSize),
}
go func() {
defer func() {
close(mp.successes)
close(mp.errors)
}()
for msg := range mp.input {
mp.l.Lock()
if mp.expectations == nil || len(mp.expectations) == 0 {
mp.expectations = nil
mp.t.Errorf("No more expectation set on this mock producer to handle the input message.")
} else {
expectation := mp.expectations[0]
mp.expectations = mp.expectations[1:]
if expectation.Result == errProduceSuccess {
mp.lastOffset++
if config.Producer.Return.Successes {
msg.Offset = mp.lastOffset
mp.successes <- msg
}
} else {
if config.Producer.Return.Errors {
mp.errors <- &sarama.ProducerError{Err: expectation.Result, Msg: msg}
}
}
}
mp.l.Unlock()
}
mp.l.Lock()
if len(mp.expectations) > 0 {
mp.t.Errorf("Expected to exhaust all expectations, but %d are left.", len(mp.expectations))
}
mp.l.Unlock()
close(mp.closed)
}()
return mp
}
////////////////////////////////////////////////
// Implement Producer interface
////////////////////////////////////////////////
// AsyncClose corresponds with the AsyncClose method of sarama's Producer implementation.
// By closing a mock producer, you also tell it that no more input will be provided, so it will
// write an error to the test state if there's any remaining expectations.
func (mp *AsyncProducer) AsyncClose() {
close(mp.input)
}
// Close corresponds with the Close method of sarama's Producer implementation.
// By closing a mock producer, you also tell it that no more input will be provided, so it will
// write an error to the test state if there's any remaining expectations.
func (mp *AsyncProducer) Close() error {
mp.AsyncClose()
<-mp.closed
return nil
}
// Input corresponds with the Input method of sarama's Producer implementation.
// You have to set expectations on the mock producer before writing messages to the Input
// channel, so it knows how to handle them. If there is no more remaining expectations and
// a messages is written to the Input channel, the mock producer will write an error to the test
// state object.
func (mp *AsyncProducer) Input() chan<- *sarama.ProducerMessage {
return mp.input
}
// Successes corresponds with the Successes method of sarama's Producer implementation.
func (mp *AsyncProducer) Successes() <-chan *sarama.ProducerMessage {
return mp.successes
}
// Errors corresponds with the Errors method of sarama's Producer implementation.
func (mp *AsyncProducer) Errors() <-chan *sarama.ProducerError {
return mp.errors
}
////////////////////////////////////////////////
// Setting expectations
////////////////////////////////////////////////
// ExpectInputAndSucceed sets an expectation on the mock producer that a message will be provided
// on the input channel. The mock producer will handle the message as if it is produced successfully,
// i.e. it will make it available on the Successes channel if the Producer.Return.Successes setting
// is set to true.
func (mp *AsyncProducer) ExpectInputAndSucceed() {
mp.l.Lock()
defer mp.l.Unlock()
mp.expectations = append(mp.expectations, &producerExpectation{Result: errProduceSuccess})
}
// ExpectInputAndFail sets an expectation on the mock producer that a message will be provided
// on the input channel. The mock producer will handle the message as if it failed to produce
// successfully. This means it will make a ProducerError available on the Errors channel.
func (mp *AsyncProducer) ExpectInputAndFail(err error) {
mp.l.Lock()
defer mp.l.Unlock()
mp.expectations = append(mp.expectations, &producerExpectation{Result: err})
}

View File

@@ -1,94 +0,0 @@
package mocks
import (
"fmt"
"testing"
"github.com/Shopify/sarama"
)
type testReporterMock struct {
errors []string
}
func newTestReporterMock() *testReporterMock {
return &testReporterMock{errors: make([]string, 0)}
}
func (trm *testReporterMock) Errorf(format string, args ...interface{}) {
trm.errors = append(trm.errors, fmt.Sprintf(format, args...))
}
func TestMockAsyncProducerImplementsAsyncProducerInterface(t *testing.T) {
var mp interface{} = &AsyncProducer{}
if _, ok := mp.(sarama.AsyncProducer); !ok {
t.Error("The mock producer should implement the sarama.Producer interface.")
}
}
func TestProducerReturnsExpectationsToChannels(t *testing.T) {
config := sarama.NewConfig()
config.Producer.Return.Successes = true
mp := NewAsyncProducer(t, config)
mp.ExpectInputAndSucceed()
mp.ExpectInputAndSucceed()
mp.ExpectInputAndFail(sarama.ErrOutOfBrokers)
mp.Input() <- &sarama.ProducerMessage{Topic: "test 1"}
mp.Input() <- &sarama.ProducerMessage{Topic: "test 2"}
mp.Input() <- &sarama.ProducerMessage{Topic: "test 3"}
msg1 := <-mp.Successes()
msg2 := <-mp.Successes()
err1 := <-mp.Errors()
if msg1.Topic != "test 1" {
t.Error("Expected message 1 to be returned first")
}
if msg2.Topic != "test 2" {
t.Error("Expected message 2 to be returned second")
}
if err1.Msg.Topic != "test 3" || err1.Err != sarama.ErrOutOfBrokers {
t.Error("Expected message 3 to be returned as error")
}
if err := mp.Close(); err != nil {
t.Error(err)
}
}
func TestProducerWithTooFewExpectations(t *testing.T) {
trm := newTestReporterMock()
mp := NewAsyncProducer(trm, nil)
mp.ExpectInputAndSucceed()
mp.Input() <- &sarama.ProducerMessage{Topic: "test"}
mp.Input() <- &sarama.ProducerMessage{Topic: "test"}
if err := mp.Close(); err != nil {
t.Error(err)
}
if len(trm.errors) != 1 {
t.Error("Expected to report an error")
}
}
func TestProducerWithTooManyExpectations(t *testing.T) {
trm := newTestReporterMock()
mp := NewAsyncProducer(trm, nil)
mp.ExpectInputAndSucceed()
mp.ExpectInputAndFail(sarama.ErrOutOfBrokers)
mp.Input() <- &sarama.ProducerMessage{Topic: "test"}
if err := mp.Close(); err != nil {
t.Error(err)
}
if len(trm.errors) != 1 {
t.Error("Expected to report an error")
}
}

View File

@@ -1,316 +0,0 @@
package mocks
import (
"sync"
"sync/atomic"
"github.com/Shopify/sarama"
)
// Consumer implements sarama's Consumer interface for testing purposes.
// Before you can start consuming from this consumer, you have to register
// topic/partitions using ExpectConsumePartition, and set expectations on them.
type Consumer struct {
l sync.Mutex
t ErrorReporter
config *sarama.Config
partitionConsumers map[string]map[int32]*PartitionConsumer
metadata map[string][]int32
}
// NewConsumer returns a new mock Consumer instance. The t argument should
// be the *testing.T instance of your test method. An error will be written to it if
// an expectation is violated. The config argument is currently unused and can be set to nil.
func NewConsumer(t ErrorReporter, config *sarama.Config) *Consumer {
if config == nil {
config = sarama.NewConfig()
}
c := &Consumer{
t: t,
config: config,
partitionConsumers: make(map[string]map[int32]*PartitionConsumer),
}
return c
}
///////////////////////////////////////////////////
// Consumer interface implementation
///////////////////////////////////////////////////
// ConsumePartition implements the ConsumePartition method from the sarama.Consumer interface.
// Before you can start consuming a partition, you have to set expectations on it using
// ExpectConsumePartition. You can only consume a partition once per consumer.
func (c *Consumer) ConsumePartition(topic string, partition int32, offset int64) (sarama.PartitionConsumer, error) {
c.l.Lock()
defer c.l.Unlock()
if c.partitionConsumers[topic] == nil || c.partitionConsumers[topic][partition] == nil {
c.t.Errorf("No expectations set for %s/%d", topic, partition)
return nil, errOutOfExpectations
}
pc := c.partitionConsumers[topic][partition]
if pc.consumed {
return nil, sarama.ConfigurationError("The topic/partition is already being consumed")
}
if pc.offset != AnyOffset && pc.offset != offset {
c.t.Errorf("Unexpected offset when calling ConsumePartition for %s/%d. Expected %d, got %d.", topic, partition, pc.offset, offset)
}
pc.consumed = true
go pc.handleExpectations()
return pc, nil
}
// Topics returns a list of topics, as registered with SetMetadata
func (c *Consumer) Topics() ([]string, error) {
c.l.Lock()
defer c.l.Unlock()
if c.metadata == nil {
c.t.Errorf("Unexpected call to Topics. Initialize the mock's topic metadata with SetMetadata.")
return nil, sarama.ErrOutOfBrokers
}
var result []string
for topic, _ := range c.metadata {
result = append(result, topic)
}
return result, nil
}
// Partitions returns the list of parititons for the given topic, as registered with SetMetadata
func (c *Consumer) Partitions(topic string) ([]int32, error) {
c.l.Lock()
defer c.l.Unlock()
if c.metadata == nil {
c.t.Errorf("Unexpected call to Partitions. Initialize the mock's topic metadata with SetMetadata.")
return nil, sarama.ErrOutOfBrokers
}
if c.metadata[topic] == nil {
return nil, sarama.ErrUnknownTopicOrPartition
}
return c.metadata[topic], nil
}
// Close implements the Close method from the sarama.Consumer interface. It will close
// all registered PartitionConsumer instances.
func (c *Consumer) Close() error {
c.l.Lock()
defer c.l.Unlock()
for _, partitions := range c.partitionConsumers {
for _, partitionConsumer := range partitions {
_ = partitionConsumer.Close()
}
}
return nil
}
///////////////////////////////////////////////////
// Expectation API
///////////////////////////////////////////////////
// SetMetadata sets the clusters topic/partition metadata,
// which will be returned by Topics() and Partitions().
func (c *Consumer) SetTopicMetadata(metadata map[string][]int32) {
c.l.Lock()
defer c.l.Unlock()
c.metadata = metadata
}
// ExpectConsumePartition will register a topic/partition, so you can set expectations on it.
// The registered PartitionConsumer will be returned, so you can set expectations
// on it using method chanining. Once a topic/partition is registered, you are
// expected to start consuming it using ConsumePartition. If that doesn't happen,
// an error will be written to the error reporter once the mock consumer is closed. It will
// also expect that the
func (c *Consumer) ExpectConsumePartition(topic string, partition int32, offset int64) *PartitionConsumer {
c.l.Lock()
defer c.l.Unlock()
if c.partitionConsumers[topic] == nil {
c.partitionConsumers[topic] = make(map[int32]*PartitionConsumer)
}
if c.partitionConsumers[topic][partition] == nil {
c.partitionConsumers[topic][partition] = &PartitionConsumer{
t: c.t,
topic: topic,
partition: partition,
offset: offset,
expectations: make(chan *consumerExpectation, 1000),
messages: make(chan *sarama.ConsumerMessage, c.config.ChannelBufferSize),
errors: make(chan *sarama.ConsumerError, c.config.ChannelBufferSize),
}
}
return c.partitionConsumers[topic][partition]
}
///////////////////////////////////////////////////
// PartitionConsumer mock type
///////////////////////////////////////////////////
// PartitionConsumer implements sarama's PartitionConsumer interface for testing purposes.
// It is returned by the mock Consumers ConsumePartitionMethod, but only if it is
// registered first using the Consumer's ExpectConsumePartition method. Before consuming the
// Errors and Messages channel, you should specify what values will be provided on these
// channels using YieldMessage and YieldError.
type PartitionConsumer struct {
l sync.Mutex
t ErrorReporter
topic string
partition int32
offset int64
expectations chan *consumerExpectation
messages chan *sarama.ConsumerMessage
errors chan *sarama.ConsumerError
singleClose sync.Once
consumed bool
errorsShouldBeDrained bool
messagesShouldBeDrained bool
highWaterMarkOffset int64
}
func (pc *PartitionConsumer) handleExpectations() {
pc.l.Lock()
defer pc.l.Unlock()
for ex := range pc.expectations {
if ex.Err != nil {
pc.errors <- &sarama.ConsumerError{
Topic: pc.topic,
Partition: pc.partition,
Err: ex.Err,
}
} else {
atomic.AddInt64(&pc.highWaterMarkOffset, 1)
ex.Msg.Topic = pc.topic
ex.Msg.Partition = pc.partition
ex.Msg.Offset = atomic.LoadInt64(&pc.highWaterMarkOffset)
pc.messages <- ex.Msg
}
}
close(pc.messages)
close(pc.errors)
}
///////////////////////////////////////////////////
// PartitionConsumer interface implementation
///////////////////////////////////////////////////
// AsyncClose implements the AsyncClose method from the sarama.PartitionConsumer interface.
func (pc *PartitionConsumer) AsyncClose() {
pc.singleClose.Do(func() {
close(pc.expectations)
})
}
// Close implements the Close method from the sarama.PartitionConsumer interface. It will
// verify whether the partition consumer was actually started.
func (pc *PartitionConsumer) Close() error {
if !pc.consumed {
pc.t.Errorf("Expectations set on %s/%d, but no partition consumer was started.", pc.topic, pc.partition)
return errPartitionConsumerNotStarted
}
if pc.errorsShouldBeDrained && len(pc.errors) > 0 {
pc.t.Errorf("Expected the errors channel for %s/%d to be drained on close, but found %d errors.", pc.topic, pc.partition, len(pc.errors))
}
if pc.messagesShouldBeDrained && len(pc.messages) > 0 {
pc.t.Errorf("Expected the messages channel for %s/%d to be drained on close, but found %d messages.", pc.topic, pc.partition, len(pc.messages))
}
pc.AsyncClose()
var (
closeErr error
wg sync.WaitGroup
)
wg.Add(1)
go func() {
defer wg.Done()
var errs = make(sarama.ConsumerErrors, 0)
for err := range pc.errors {
errs = append(errs, err)
}
if len(errs) > 0 {
closeErr = errs
}
}()
wg.Add(1)
go func() {
defer wg.Done()
for _ = range pc.messages {
// drain
}
}()
wg.Wait()
return closeErr
}
// Errors implements the Errors method from the sarama.PartitionConsumer interface.
func (pc *PartitionConsumer) Errors() <-chan *sarama.ConsumerError {
return pc.errors
}
// Messages implements the Messages method from the sarama.PartitionConsumer interface.
func (pc *PartitionConsumer) Messages() <-chan *sarama.ConsumerMessage {
return pc.messages
}
func (pc *PartitionConsumer) HighWaterMarkOffset() int64 {
return atomic.LoadInt64(&pc.highWaterMarkOffset) + 1
}
///////////////////////////////////////////////////
// Expectation API
///////////////////////////////////////////////////
// YieldMessage will yield a messages Messages channel of this partition consumer
// when it is consumed. By default, the mock consumer will not verify whether this
// message was consumed from the Messages channel, because there are legitimate
// reasons forthis not to happen. ou can call ExpectMessagesDrainedOnClose so it will
// verify that the channel is empty on close.
func (pc *PartitionConsumer) YieldMessage(msg *sarama.ConsumerMessage) {
pc.expectations <- &consumerExpectation{Msg: msg}
}
// YieldError will yield an error on the Errors channel of this partition consumer
// when it is consumed. By default, the mock consumer will not verify whether this error was
// consumed from the Errors channel, because there are legitimate reasons for this
// not to happen. You can call ExpectErrorsDrainedOnClose so it will verify that
// the channel is empty on close.
func (pc *PartitionConsumer) YieldError(err error) {
pc.expectations <- &consumerExpectation{Err: err}
}
// ExpectMessagesDrainedOnClose sets an expectation on the partition consumer
// that the messages channel will be fully drained when Close is called. If this
// expectation is not met, an error is reported to the error reporter.
func (pc *PartitionConsumer) ExpectMessagesDrainedOnClose() {
pc.messagesShouldBeDrained = true
}
// ExpectErrorsDrainedOnClose sets an expectation on the partition consumer
// that the errors channel will be fully drained when Close is called. If this
// expectation is not met, an error is reported to the error reporter.
func (pc *PartitionConsumer) ExpectErrorsDrainedOnClose() {
pc.errorsShouldBeDrained = true
}

View File

@@ -1,249 +0,0 @@
package mocks
import (
"sort"
"testing"
"github.com/Shopify/sarama"
)
func TestMockConsumerImplementsConsumerInterface(t *testing.T) {
var c interface{} = &Consumer{}
if _, ok := c.(sarama.Consumer); !ok {
t.Error("The mock consumer should implement the sarama.Consumer interface.")
}
var pc interface{} = &PartitionConsumer{}
if _, ok := pc.(sarama.PartitionConsumer); !ok {
t.Error("The mock partitionconsumer should implement the sarama.PartitionConsumer interface.")
}
}
func TestConsumerHandlesExpectations(t *testing.T) {
consumer := NewConsumer(t, nil)
defer func() {
if err := consumer.Close(); err != nil {
t.Error(err)
}
}()
consumer.ExpectConsumePartition("test", 0, sarama.OffsetOldest).YieldMessage(&sarama.ConsumerMessage{Value: []byte("hello world")})
consumer.ExpectConsumePartition("test", 0, sarama.OffsetOldest).YieldError(sarama.ErrOutOfBrokers)
consumer.ExpectConsumePartition("test", 1, sarama.OffsetOldest).YieldMessage(&sarama.ConsumerMessage{Value: []byte("hello world again")})
consumer.ExpectConsumePartition("other", 0, AnyOffset).YieldMessage(&sarama.ConsumerMessage{Value: []byte("hello other")})
pc_test0, err := consumer.ConsumePartition("test", 0, sarama.OffsetOldest)
if err != nil {
t.Fatal(err)
}
test0_msg := <-pc_test0.Messages()
if test0_msg.Topic != "test" || test0_msg.Partition != 0 || string(test0_msg.Value) != "hello world" {
t.Error("Message was not as expected:", test0_msg)
}
test0_err := <-pc_test0.Errors()
if test0_err.Err != sarama.ErrOutOfBrokers {
t.Error("Expected sarama.ErrOutOfBrokers, found:", test0_err.Err)
}
pc_test1, err := consumer.ConsumePartition("test", 1, sarama.OffsetOldest)
if err != nil {
t.Fatal(err)
}
test1_msg := <-pc_test1.Messages()
if test1_msg.Topic != "test" || test1_msg.Partition != 1 || string(test1_msg.Value) != "hello world again" {
t.Error("Message was not as expected:", test1_msg)
}
pc_other0, err := consumer.ConsumePartition("other", 0, sarama.OffsetNewest)
if err != nil {
t.Fatal(err)
}
other0_msg := <-pc_other0.Messages()
if other0_msg.Topic != "other" || other0_msg.Partition != 0 || string(other0_msg.Value) != "hello other" {
t.Error("Message was not as expected:", other0_msg)
}
}
func TestConsumerReturnsNonconsumedErrorsOnClose(t *testing.T) {
consumer := NewConsumer(t, nil)
consumer.ExpectConsumePartition("test", 0, sarama.OffsetOldest).YieldError(sarama.ErrOutOfBrokers)
consumer.ExpectConsumePartition("test", 0, sarama.OffsetOldest).YieldError(sarama.ErrOutOfBrokers)
pc, err := consumer.ConsumePartition("test", 0, sarama.OffsetOldest)
if err != nil {
t.Fatal(err)
}
select {
case <-pc.Messages():
t.Error("Did not epxect a message on the messages channel.")
case err := <-pc.Errors():
if err.Err != sarama.ErrOutOfBrokers {
t.Error("Expected sarama.ErrOutOfBrokers, found", err)
}
}
errs := pc.Close().(sarama.ConsumerErrors)
if len(errs) != 1 && errs[0].Err != sarama.ErrOutOfBrokers {
t.Error("Expected Close to return the remaining sarama.ErrOutOfBrokers")
}
}
func TestConsumerWithoutExpectationsOnPartition(t *testing.T) {
trm := newTestReporterMock()
consumer := NewConsumer(trm, nil)
_, err := consumer.ConsumePartition("test", 1, sarama.OffsetOldest)
if err != errOutOfExpectations {
t.Error("Expected ConsumePartition to return errOutOfExpectations")
}
if err := consumer.Close(); err != nil {
t.Error("No error expected on close, but found:", err)
}
if len(trm.errors) != 1 {
t.Errorf("Expected an expectation failure to be set on the error reporter.")
}
}
func TestConsumerWithExpectationsOnUnconsumedPartition(t *testing.T) {
trm := newTestReporterMock()
consumer := NewConsumer(trm, nil)
consumer.ExpectConsumePartition("test", 0, sarama.OffsetOldest).YieldMessage(&sarama.ConsumerMessage{Value: []byte("hello world")})
if err := consumer.Close(); err != nil {
t.Error("No error expected on close, but found:", err)
}
if len(trm.errors) != 1 {
t.Errorf("Expected an expectation failure to be set on the error reporter.")
}
}
func TestConsumerWithWrongOffsetExpectation(t *testing.T) {
trm := newTestReporterMock()
consumer := NewConsumer(trm, nil)
consumer.ExpectConsumePartition("test", 0, sarama.OffsetOldest)
_, err := consumer.ConsumePartition("test", 0, sarama.OffsetNewest)
if err != nil {
t.Error("Did not expect error, found:", err)
}
if len(trm.errors) != 1 {
t.Errorf("Expected an expectation failure to be set on the error reporter.")
}
if err := consumer.Close(); err != nil {
t.Error(err)
}
}
func TestConsumerViolatesMessagesDrainedExpectation(t *testing.T) {
trm := newTestReporterMock()
consumer := NewConsumer(trm, nil)
pcmock := consumer.ExpectConsumePartition("test", 0, sarama.OffsetOldest)
pcmock.YieldMessage(&sarama.ConsumerMessage{Value: []byte("hello")})
pcmock.YieldMessage(&sarama.ConsumerMessage{Value: []byte("hello")})
pcmock.ExpectMessagesDrainedOnClose()
pc, err := consumer.ConsumePartition("test", 0, sarama.OffsetOldest)
if err != nil {
t.Error(err)
}
// consume first message, not second one
<-pc.Messages()
if err := consumer.Close(); err != nil {
t.Error(err)
}
if len(trm.errors) != 1 {
t.Errorf("Expected an expectation failure to be set on the error reporter.")
}
}
func TestConsumerMeetsErrorsDrainedExpectation(t *testing.T) {
trm := newTestReporterMock()
consumer := NewConsumer(trm, nil)
pcmock := consumer.ExpectConsumePartition("test", 0, sarama.OffsetOldest)
pcmock.YieldError(sarama.ErrInvalidMessage)
pcmock.YieldError(sarama.ErrInvalidMessage)
pcmock.ExpectErrorsDrainedOnClose()
pc, err := consumer.ConsumePartition("test", 0, sarama.OffsetOldest)
if err != nil {
t.Error(err)
}
// consume first and second error,
<-pc.Errors()
<-pc.Errors()
if err := consumer.Close(); err != nil {
t.Error(err)
}
if len(trm.errors) != 0 {
t.Errorf("Expected no expectation failures to be set on the error reporter.")
}
}
func TestConsumerTopicMetadata(t *testing.T) {
trm := newTestReporterMock()
consumer := NewConsumer(trm, nil)
consumer.SetTopicMetadata(map[string][]int32{
"test1": []int32{0, 1, 2, 3},
"test2": []int32{0, 1, 2, 3, 4, 5, 6, 7},
})
topics, err := consumer.Topics()
if err != nil {
t.Error(t)
}
sortedTopics := sort.StringSlice(topics)
sortedTopics.Sort()
if len(sortedTopics) != 2 || sortedTopics[0] != "test1" || sortedTopics[1] != "test2" {
t.Error("Unexpected topics returned:", sortedTopics)
}
partitions1, err := consumer.Partitions("test1")
if err != nil {
t.Error(t)
}
if len(partitions1) != 4 {
t.Error("Unexpected partitions returned:", len(partitions1))
}
partitions2, err := consumer.Partitions("test2")
if err != nil {
t.Error(t)
}
if len(partitions2) != 8 {
t.Error("Unexpected partitions returned:", len(partitions2))
}
if len(trm.errors) != 0 {
t.Errorf("Expected no expectation failures to be set on the error reporter.")
}
}
func TestConsumerUnexpectedTopicMetadata(t *testing.T) {
trm := newTestReporterMock()
consumer := NewConsumer(trm, nil)
if _, err := consumer.Topics(); err != sarama.ErrOutOfBrokers {
t.Error("Expected sarama.ErrOutOfBrokers, found", err)
}
if len(trm.errors) != 1 {
t.Errorf("Expected an expectation failure to be set on the error reporter.")
}
}

View File

@@ -1,43 +0,0 @@
/*
Package mocks provides mocks that can be used for testing applications
that use Sarama. The mock types provided by this package implement the
interfaces Sarama exports, so you can use them for dependency injection
in your tests.
All mock instances require you to set expectations on them before you
can use them. It will determine how the mock will behave. If an
expectation is not met, it will make your test fail.
NOTE: this package currently does not fall under the API stability
guarantee of Sarama as it is still considered experimental.
*/
package mocks
import (
"errors"
"github.com/Shopify/sarama"
)
// A simple interface that includes the testing.T methods we use to report
// expectation violations when using the mock objects.
type ErrorReporter interface {
Errorf(string, ...interface{})
}
var (
errProduceSuccess error = nil
errOutOfExpectations = errors.New("No more expectations set on mock")
errPartitionConsumerNotStarted = errors.New("The partition consumer was never started")
)
const AnyOffset int64 = -1000
type producerExpectation struct {
Result error
}
type consumerExpectation struct {
Err error
Msg *sarama.ConsumerMessage
}

View File

@@ -1,93 +0,0 @@
package mocks
import (
"github.com/Shopify/sarama"
"sync"
)
// SyncProducer implements sarama's SyncProducer interface for testing purposes.
// Before you can use it, you have to set expectations on the mock SyncProducer
// to tell it how to handle calls to SendMessage, so you can easily test success
// and failure scenarios.
type SyncProducer struct {
l sync.Mutex
t ErrorReporter
expectations []*producerExpectation
lastOffset int64
}
// NewSyncProducer instantiates a new SyncProducer mock. The t argument should
// be the *testing.T instance of your test method. An error will be written to it if
// an expectation is violated. The config argument is currently unused, but is
// maintained to be compatible with the async Producer.
func NewSyncProducer(t ErrorReporter, config *sarama.Config) *SyncProducer {
return &SyncProducer{
t: t,
expectations: make([]*producerExpectation, 0),
}
}
////////////////////////////////////////////////
// Implement SyncProducer interface
////////////////////////////////////////////////
// SendMessage corresponds with the SendMessage method of sarama's SyncProducer implementation.
// You have to set expectations on the mock producer before calling SendMessage, so it knows
// how to handle them. If there is no more remaining expectations when SendMessage is called,
// the mock producer will write an error to the test state object.
func (sp *SyncProducer) SendMessage(msg *sarama.ProducerMessage) (partition int32, offset int64, err error) {
sp.l.Lock()
defer sp.l.Unlock()
if len(sp.expectations) > 0 {
expectation := sp.expectations[0]
sp.expectations = sp.expectations[1:]
if expectation.Result == errProduceSuccess {
sp.lastOffset++
msg.Offset = sp.lastOffset
return 0, msg.Offset, nil
} else {
return -1, -1, expectation.Result
}
} else {
sp.t.Errorf("No more expectation set on this mock producer to handle the input message.")
return -1, -1, errOutOfExpectations
}
}
// Close corresponds with the Close method of sarama's SyncProducer implementation.
// By closing a mock syncproducer, you also tell it that no more SendMessage calls will follow,
// so it will write an error to the test state if there's any remaining expectations.
func (sp *SyncProducer) Close() error {
sp.l.Lock()
defer sp.l.Unlock()
if len(sp.expectations) > 0 {
sp.t.Errorf("Expected to exhaust all expectations, but %d are left.", len(sp.expectations))
}
return nil
}
////////////////////////////////////////////////
// Setting expectations
////////////////////////////////////////////////
// ExpectSendMessageAndSucceed sets an expectation on the mock producer that SendMessage will be
// called. The mock producer will handle the message as if it produced successfully, i.e. by
// returning a valid partition, and offset, and a nil error.
func (sp *SyncProducer) ExpectSendMessageAndSucceed() {
sp.l.Lock()
defer sp.l.Unlock()
sp.expectations = append(sp.expectations, &producerExpectation{Result: errProduceSuccess})
}
// ExpectSendMessageAndFail sets an expectation on the mock producer that SendMessage will be
// called. The mock producer will handle the message as if it failed to produce
// successfully, i.e. by returning the provided error.
func (sp *SyncProducer) ExpectSendMessageAndFail(err error) {
sp.l.Lock()
defer sp.l.Unlock()
sp.expectations = append(sp.expectations, &producerExpectation{Result: err})
}

View File

@@ -1,98 +0,0 @@
package mocks
import (
"testing"
"github.com/Shopify/sarama"
)
func TestMockSyncProducerImplementsSyncProducerInterface(t *testing.T) {
var mp interface{} = &SyncProducer{}
if _, ok := mp.(sarama.SyncProducer); !ok {
t.Error("The mock async producer should implement the sarama.SyncProducer interface.")
}
}
func TestSyncProducerReturnsExpectationsToSendMessage(t *testing.T) {
sp := NewSyncProducer(t, nil)
defer func() {
if err := sp.Close(); err != nil {
t.Error(err)
}
}()
sp.ExpectSendMessageAndSucceed()
sp.ExpectSendMessageAndSucceed()
sp.ExpectSendMessageAndFail(sarama.ErrOutOfBrokers)
msg := &sarama.ProducerMessage{Topic: "test", Value: sarama.StringEncoder("test")}
_, offset, err := sp.SendMessage(msg)
if err != nil {
t.Errorf("The first message should have been produced successfully, but got %s", err)
}
if offset != 1 || offset != msg.Offset {
t.Errorf("The first message should have been assigned offset 1, but got %d", msg.Offset)
}
_, offset, err = sp.SendMessage(msg)
if err != nil {
t.Errorf("The second message should have been produced successfully, but got %s", err)
}
if offset != 2 || offset != msg.Offset {
t.Errorf("The second message should have been assigned offset 2, but got %d", offset)
}
_, _, err = sp.SendMessage(msg)
if err != sarama.ErrOutOfBrokers {
t.Errorf("The third message should not have been produced successfully")
}
if err := sp.Close(); err != nil {
t.Error(err)
}
}
func TestSyncProducerWithTooManyExpectations(t *testing.T) {
trm := newTestReporterMock()
sp := NewSyncProducer(trm, nil)
sp.ExpectSendMessageAndSucceed()
sp.ExpectSendMessageAndFail(sarama.ErrOutOfBrokers)
msg := &sarama.ProducerMessage{Topic: "test", Value: sarama.StringEncoder("test")}
if _, _, err := sp.SendMessage(msg); err != nil {
t.Error("No error expected on first SendMessage call", err)
}
if err := sp.Close(); err != nil {
t.Error(err)
}
if len(trm.errors) != 1 {
t.Error("Expected to report an error")
}
}
func TestSyncProducerWithTooFewExpectations(t *testing.T) {
trm := newTestReporterMock()
sp := NewSyncProducer(trm, nil)
sp.ExpectSendMessageAndSucceed()
msg := &sarama.ProducerMessage{Topic: "test", Value: sarama.StringEncoder("test")}
if _, _, err := sp.SendMessage(msg); err != nil {
t.Error("No error expected on first SendMessage call", err)
}
if _, _, err := sp.SendMessage(msg); err != errOutOfExpectations {
t.Error("errOutOfExpectations expected on second SendMessage call, found:", err)
}
if err := sp.Close(); err != nil {
t.Error(err)
}
if len(trm.errors) != 1 {
t.Error("Expected to report an error")
}
}

View File

@@ -1,172 +0,0 @@
package sarama
// ReceiveTime is a special value for the timestamp field of Offset Commit Requests which
// tells the broker to set the timestamp to the time at which the request was received.
// The timestamp is only used if message version 1 is used, which requires kafka 0.8.2.
const ReceiveTime int64 = -1
type offsetCommitRequestBlock struct {
offset int64
timestamp int64
metadata string
}
func (r *offsetCommitRequestBlock) encode(pe packetEncoder, version int16) error {
pe.putInt64(r.offset)
if version == 1 {
pe.putInt64(r.timestamp)
} else if r.timestamp != 0 {
Logger.Println("Non-zero timestamp specified for OffsetCommitRequest not v1, it will be ignored")
}
return pe.putString(r.metadata)
}
func (r *offsetCommitRequestBlock) decode(pd packetDecoder, version int16) (err error) {
if r.offset, err = pd.getInt64(); err != nil {
return err
}
if version == 1 {
if r.timestamp, err = pd.getInt64(); err != nil {
return err
}
}
r.metadata, err = pd.getString()
return err
}
type OffsetCommitRequest struct {
ConsumerGroup string
ConsumerGroupGeneration int32 // v1 or later
ConsumerID string // v1 or later
RetentionTime int64 // v2 or later
// Version can be:
// - 0 (kafka 0.8.1 and later)
// - 1 (kafka 0.8.2 and later)
// - 2 (kafka 0.8.3 and later)
Version int16
blocks map[string]map[int32]*offsetCommitRequestBlock
}
func (r *OffsetCommitRequest) encode(pe packetEncoder) error {
if r.Version < 0 || r.Version > 2 {
return PacketEncodingError{"invalid or unsupported OffsetCommitRequest version field"}
}
if err := pe.putString(r.ConsumerGroup); err != nil {
return err
}
if r.Version >= 1 {
pe.putInt32(r.ConsumerGroupGeneration)
if err := pe.putString(r.ConsumerID); err != nil {
return err
}
} else {
if r.ConsumerGroupGeneration != 0 {
Logger.Println("Non-zero ConsumerGroupGeneration specified for OffsetCommitRequest v0, it will be ignored")
}
if r.ConsumerID != "" {
Logger.Println("Non-empty ConsumerID specified for OffsetCommitRequest v0, it will be ignored")
}
}
if r.Version >= 2 {
pe.putInt64(r.RetentionTime)
} else if r.RetentionTime != 0 {
Logger.Println("Non-zero RetentionTime specified for OffsetCommitRequest version <2, it will be ignored")
}
if err := pe.putArrayLength(len(r.blocks)); err != nil {
return err
}
for topic, partitions := range r.blocks {
if err := pe.putString(topic); err != nil {
return err
}
if err := pe.putArrayLength(len(partitions)); err != nil {
return err
}
for partition, block := range partitions {
pe.putInt32(partition)
if err := block.encode(pe, r.Version); err != nil {
return err
}
}
}
return nil
}
func (r *OffsetCommitRequest) decode(pd packetDecoder) (err error) {
if r.ConsumerGroup, err = pd.getString(); err != nil {
return err
}
if r.Version >= 1 {
if r.ConsumerGroupGeneration, err = pd.getInt32(); err != nil {
return err
}
if r.ConsumerID, err = pd.getString(); err != nil {
return err
}
}
if r.Version >= 2 {
if r.RetentionTime, err = pd.getInt64(); err != nil {
return err
}
}
topicCount, err := pd.getArrayLength()
if err != nil {
return err
}
if topicCount == 0 {
return nil
}
r.blocks = make(map[string]map[int32]*offsetCommitRequestBlock)
for i := 0; i < topicCount; i++ {
topic, err := pd.getString()
if err != nil {
return err
}
partitionCount, err := pd.getArrayLength()
if err != nil {
return err
}
r.blocks[topic] = make(map[int32]*offsetCommitRequestBlock)
for j := 0; j < partitionCount; j++ {
partition, err := pd.getInt32()
if err != nil {
return err
}
block := &offsetCommitRequestBlock{}
if err := block.decode(pd, r.Version); err != nil {
return err
}
r.blocks[topic][partition] = block
}
}
return nil
}
func (r *OffsetCommitRequest) key() int16 {
return 8
}
func (r *OffsetCommitRequest) version() int16 {
return r.Version
}
func (r *OffsetCommitRequest) AddBlock(topic string, partitionID int32, offset int64, timestamp int64, metadata string) {
if r.blocks == nil {
r.blocks = make(map[string]map[int32]*offsetCommitRequestBlock)
}
if r.blocks[topic] == nil {
r.blocks[topic] = make(map[int32]*offsetCommitRequestBlock)
}
r.blocks[topic][partitionID] = &offsetCommitRequestBlock{offset, timestamp, metadata}
}

View File

@@ -1,90 +0,0 @@
package sarama
import "testing"
var (
offsetCommitRequestNoBlocksV0 = []byte{
0x00, 0x06, 'f', 'o', 'o', 'b', 'a', 'r',
0x00, 0x00, 0x00, 0x00}
offsetCommitRequestNoBlocksV1 = []byte{
0x00, 0x06, 'f', 'o', 'o', 'b', 'a', 'r',
0x00, 0x00, 0x11, 0x22,
0x00, 0x04, 'c', 'o', 'n', 's',
0x00, 0x00, 0x00, 0x00}
offsetCommitRequestNoBlocksV2 = []byte{
0x00, 0x06, 'f', 'o', 'o', 'b', 'a', 'r',
0x00, 0x00, 0x11, 0x22,
0x00, 0x04, 'c', 'o', 'n', 's',
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x44, 0x33,
0x00, 0x00, 0x00, 0x00}
offsetCommitRequestOneBlockV0 = []byte{
0x00, 0x06, 'f', 'o', 'o', 'b', 'a', 'r',
0x00, 0x00, 0x00, 0x01,
0x00, 0x05, 't', 'o', 'p', 'i', 'c',
0x00, 0x00, 0x00, 0x01,
0x00, 0x00, 0x52, 0x21,
0x00, 0x00, 0x00, 0x00, 0xDE, 0xAD, 0xBE, 0xEF,
0x00, 0x08, 'm', 'e', 't', 'a', 'd', 'a', 't', 'a'}
offsetCommitRequestOneBlockV1 = []byte{
0x00, 0x06, 'f', 'o', 'o', 'b', 'a', 'r',
0x00, 0x00, 0x11, 0x22,
0x00, 0x04, 'c', 'o', 'n', 's',
0x00, 0x00, 0x00, 0x01,
0x00, 0x05, 't', 'o', 'p', 'i', 'c',
0x00, 0x00, 0x00, 0x01,
0x00, 0x00, 0x52, 0x21,
0x00, 0x00, 0x00, 0x00, 0xDE, 0xAD, 0xBE, 0xEF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0x00, 0x08, 'm', 'e', 't', 'a', 'd', 'a', 't', 'a'}
offsetCommitRequestOneBlockV2 = []byte{
0x00, 0x06, 'f', 'o', 'o', 'b', 'a', 'r',
0x00, 0x00, 0x11, 0x22,
0x00, 0x04, 'c', 'o', 'n', 's',
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x44, 0x33,
0x00, 0x00, 0x00, 0x01,
0x00, 0x05, 't', 'o', 'p', 'i', 'c',
0x00, 0x00, 0x00, 0x01,
0x00, 0x00, 0x52, 0x21,
0x00, 0x00, 0x00, 0x00, 0xDE, 0xAD, 0xBE, 0xEF,
0x00, 0x08, 'm', 'e', 't', 'a', 'd', 'a', 't', 'a'}
)
func TestOffsetCommitRequestV0(t *testing.T) {
request := new(OffsetCommitRequest)
request.Version = 0
request.ConsumerGroup = "foobar"
testRequest(t, "no blocks v0", request, offsetCommitRequestNoBlocksV0)
request.AddBlock("topic", 0x5221, 0xDEADBEEF, 0, "metadata")
testRequest(t, "one block v0", request, offsetCommitRequestOneBlockV0)
}
func TestOffsetCommitRequestV1(t *testing.T) {
request := new(OffsetCommitRequest)
request.ConsumerGroup = "foobar"
request.ConsumerID = "cons"
request.ConsumerGroupGeneration = 0x1122
request.Version = 1
testRequest(t, "no blocks v1", request, offsetCommitRequestNoBlocksV1)
request.AddBlock("topic", 0x5221, 0xDEADBEEF, ReceiveTime, "metadata")
testRequest(t, "one block v1", request, offsetCommitRequestOneBlockV1)
}
func TestOffsetCommitRequestV2(t *testing.T) {
request := new(OffsetCommitRequest)
request.ConsumerGroup = "foobar"
request.ConsumerID = "cons"
request.ConsumerGroupGeneration = 0x1122
request.RetentionTime = 0x4433
request.Version = 2
testRequest(t, "no blocks v2", request, offsetCommitRequestNoBlocksV2)
request.AddBlock("topic", 0x5221, 0xDEADBEEF, 0, "metadata")
testRequest(t, "one block v2", request, offsetCommitRequestOneBlockV2)
}

View File

@@ -1,73 +0,0 @@
package sarama
type OffsetCommitResponse struct {
Errors map[string]map[int32]KError
}
func (r *OffsetCommitResponse) AddError(topic string, partition int32, kerror KError) {
if r.Errors == nil {
r.Errors = make(map[string]map[int32]KError)
}
partitions := r.Errors[topic]
if partitions == nil {
partitions = make(map[int32]KError)
r.Errors[topic] = partitions
}
partitions[partition] = kerror
}
func (r *OffsetCommitResponse) encode(pe packetEncoder) error {
if err := pe.putArrayLength(len(r.Errors)); err != nil {
return err
}
for topic, partitions := range r.Errors {
if err := pe.putString(topic); err != nil {
return err
}
if err := pe.putArrayLength(len(partitions)); err != nil {
return err
}
for partition, kerror := range partitions {
pe.putInt32(partition)
pe.putInt16(int16(kerror))
}
}
return nil
}
func (r *OffsetCommitResponse) decode(pd packetDecoder) (err error) {
numTopics, err := pd.getArrayLength()
if err != nil || numTopics == 0 {
return err
}
r.Errors = make(map[string]map[int32]KError, numTopics)
for i := 0; i < numTopics; i++ {
name, err := pd.getString()
if err != nil {
return err
}
numErrors, err := pd.getArrayLength()
if err != nil {
return err
}
r.Errors[name] = make(map[int32]KError, numErrors)
for j := 0; j < numErrors; j++ {
id, err := pd.getInt32()
if err != nil {
return err
}
tmp, err := pd.getInt16()
if err != nil {
return err
}
r.Errors[name][id] = KError(tmp)
}
}
return nil
}

View File

@@ -1,24 +0,0 @@
package sarama
import (
"testing"
)
var (
emptyOffsetCommitResponse = []byte{
0x00, 0x00, 0x00, 0x00}
)
func TestEmptyOffsetCommitResponse(t *testing.T) {
response := OffsetCommitResponse{}
testResponse(t, "empty", &response, emptyOffsetCommitResponse)
}
func TestNormalOffsetCommitResponse(t *testing.T) {
response := OffsetCommitResponse{}
response.AddError("t", 0, ErrNotLeaderForPartition)
response.Errors["m"] = make(map[int32]KError)
// The response encoded form cannot be checked for it varies due to
// unpredictable map traversal order.
testResponse(t, "normal", &response, nil)
}

View File

@@ -1,71 +0,0 @@
package sarama
type OffsetFetchRequest struct {
ConsumerGroup string
Version int16
partitions map[string][]int32
}
func (r *OffsetFetchRequest) encode(pe packetEncoder) (err error) {
if r.Version < 0 || r.Version > 1 {
return PacketEncodingError{"invalid or unsupported OffsetFetchRequest version field"}
}
if err = pe.putString(r.ConsumerGroup); err != nil {
return err
}
if err = pe.putArrayLength(len(r.partitions)); err != nil {
return err
}
for topic, partitions := range r.partitions {
if err = pe.putString(topic); err != nil {
return err
}
if err = pe.putInt32Array(partitions); err != nil {
return err
}
}
return nil
}
func (r *OffsetFetchRequest) decode(pd packetDecoder) (err error) {
if r.ConsumerGroup, err = pd.getString(); err != nil {
return err
}
partitionCount, err := pd.getArrayLength()
if err != nil {
return err
}
if partitionCount == 0 {
return nil
}
r.partitions = make(map[string][]int32)
for i := 0; i < partitionCount; i++ {
topic, err := pd.getString()
if err != nil {
return err
}
partitions, err := pd.getInt32Array()
if err != nil {
return err
}
r.partitions[topic] = partitions
}
return nil
}
func (r *OffsetFetchRequest) key() int16 {
return 9
}
func (r *OffsetFetchRequest) version() int16 {
return r.Version
}
func (r *OffsetFetchRequest) AddPartition(topic string, partitionID int32) {
if r.partitions == nil {
r.partitions = make(map[string][]int32)
}
r.partitions[topic] = append(r.partitions[topic], partitionID)
}

View File

@@ -1,31 +0,0 @@
package sarama
import "testing"
var (
offsetFetchRequestNoGroupNoPartitions = []byte{
0x00, 0x00,
0x00, 0x00, 0x00, 0x00}
offsetFetchRequestNoPartitions = []byte{
0x00, 0x04, 'b', 'l', 'a', 'h',
0x00, 0x00, 0x00, 0x00}
offsetFetchRequestOnePartition = []byte{
0x00, 0x04, 'b', 'l', 'a', 'h',
0x00, 0x00, 0x00, 0x01,
0x00, 0x0D, 't', 'o', 'p', 'i', 'c', 'T', 'h', 'e', 'F', 'i', 'r', 's', 't',
0x00, 0x00, 0x00, 0x01,
0x4F, 0x4F, 0x4F, 0x4F}
)
func TestOffsetFetchRequest(t *testing.T) {
request := new(OffsetFetchRequest)
testRequest(t, "no group, no partitions", request, offsetFetchRequestNoGroupNoPartitions)
request.ConsumerGroup = "blah"
testRequest(t, "no partitions", request, offsetFetchRequestNoPartitions)
request.AddPartition("topicTheFirst", 0x4F4F4F4F)
testRequest(t, "one partition", request, offsetFetchRequestOnePartition)
}

View File

@@ -1,131 +0,0 @@
package sarama
type OffsetFetchResponseBlock struct {
Offset int64
Metadata string
Err KError
}
func (r *OffsetFetchResponseBlock) decode(pd packetDecoder) (err error) {
r.Offset, err = pd.getInt64()
if err != nil {
return err
}
r.Metadata, err = pd.getString()
if err != nil {
return err
}
tmp, err := pd.getInt16()
if err != nil {
return err
}
r.Err = KError(tmp)
return nil
}
func (r *OffsetFetchResponseBlock) encode(pe packetEncoder) (err error) {
pe.putInt64(r.Offset)
err = pe.putString(r.Metadata)
if err != nil {
return err
}
pe.putInt16(int16(r.Err))
return nil
}
type OffsetFetchResponse struct {
Blocks map[string]map[int32]*OffsetFetchResponseBlock
}
func (r *OffsetFetchResponse) encode(pe packetEncoder) error {
if err := pe.putArrayLength(len(r.Blocks)); err != nil {
return err
}
for topic, partitions := range r.Blocks {
if err := pe.putString(topic); err != nil {
return err
}
if err := pe.putArrayLength(len(partitions)); err != nil {
return err
}
for partition, block := range partitions {
pe.putInt32(partition)
if err := block.encode(pe); err != nil {
return err
}
}
}
return nil
}
func (r *OffsetFetchResponse) decode(pd packetDecoder) (err error) {
numTopics, err := pd.getArrayLength()
if err != nil || numTopics == 0 {
return err
}
r.Blocks = make(map[string]map[int32]*OffsetFetchResponseBlock, numTopics)
for i := 0; i < numTopics; i++ {
name, err := pd.getString()
if err != nil {
return err
}
numBlocks, err := pd.getArrayLength()
if err != nil {
return err
}
if numBlocks == 0 {
r.Blocks[name] = nil
continue
}
r.Blocks[name] = make(map[int32]*OffsetFetchResponseBlock, numBlocks)
for j := 0; j < numBlocks; j++ {
id, err := pd.getInt32()
if err != nil {
return err
}
block := new(OffsetFetchResponseBlock)
err = block.decode(pd)
if err != nil {
return err
}
r.Blocks[name][id] = block
}
}
return nil
}
func (r *OffsetFetchResponse) GetBlock(topic string, partition int32) *OffsetFetchResponseBlock {
if r.Blocks == nil {
return nil
}
if r.Blocks[topic] == nil {
return nil
}
return r.Blocks[topic][partition]
}
func (r *OffsetFetchResponse) AddBlock(topic string, partition int32, block *OffsetFetchResponseBlock) {
if r.Blocks == nil {
r.Blocks = make(map[string]map[int32]*OffsetFetchResponseBlock)
}
partitions := r.Blocks[topic]
if partitions == nil {
partitions = make(map[int32]*OffsetFetchResponseBlock)
r.Blocks[topic] = partitions
}
partitions[partition] = block
}

View File

@@ -1,22 +0,0 @@
package sarama
import "testing"
var (
emptyOffsetFetchResponse = []byte{
0x00, 0x00, 0x00, 0x00}
)
func TestEmptyOffsetFetchResponse(t *testing.T) {
response := OffsetFetchResponse{}
testResponse(t, "empty", &response, emptyOffsetFetchResponse)
}
func TestNormalOffsetFetchResponse(t *testing.T) {
response := OffsetFetchResponse{}
response.AddBlock("t", 0, &OffsetFetchResponseBlock{0, "md", ErrRequestTimedOut})
response.Blocks["m"] = nil
// The response encoded form cannot be checked for it varies due to
// unpredictable map traversal order.
testResponse(t, "normal", &response, nil)
}

View File

@@ -1,113 +0,0 @@
package sarama
type offsetRequestBlock struct {
time int64
maxOffsets int32
}
func (r *offsetRequestBlock) encode(pe packetEncoder) error {
pe.putInt64(int64(r.time))
pe.putInt32(r.maxOffsets)
return nil
}
func (r *offsetRequestBlock) decode(pd packetDecoder) (err error) {
if r.time, err = pd.getInt64(); err != nil {
return err
}
if r.maxOffsets, err = pd.getInt32(); err != nil {
return err
}
return nil
}
type OffsetRequest struct {
blocks map[string]map[int32]*offsetRequestBlock
}
func (r *OffsetRequest) encode(pe packetEncoder) error {
pe.putInt32(-1) // replica ID is always -1 for clients
err := pe.putArrayLength(len(r.blocks))
if err != nil {
return err
}
for topic, partitions := range r.blocks {
err = pe.putString(topic)
if err != nil {
return err
}
err = pe.putArrayLength(len(partitions))
if err != nil {
return err
}
for partition, block := range partitions {
pe.putInt32(partition)
if err = block.encode(pe); err != nil {
return err
}
}
}
return nil
}
func (r *OffsetRequest) decode(pd packetDecoder) error {
// Ignore replica ID
if _, err := pd.getInt32(); err != nil {
return err
}
blockCount, err := pd.getArrayLength()
if err != nil {
return err
}
if blockCount == 0 {
return nil
}
r.blocks = make(map[string]map[int32]*offsetRequestBlock)
for i := 0; i < blockCount; i++ {
topic, err := pd.getString()
if err != nil {
return err
}
partitionCount, err := pd.getArrayLength()
if err != nil {
return err
}
r.blocks[topic] = make(map[int32]*offsetRequestBlock)
for j := 0; j < partitionCount; j++ {
partition, err := pd.getInt32()
if err != nil {
return err
}
block := &offsetRequestBlock{}
if err := block.decode(pd); err != nil {
return err
}
r.blocks[topic][partition] = block
}
}
return nil
}
func (r *OffsetRequest) key() int16 {
return 2
}
func (r *OffsetRequest) version() int16 {
return 0
}
func (r *OffsetRequest) AddBlock(topic string, partitionID int32, time int64, maxOffsets int32) {
if r.blocks == nil {
r.blocks = make(map[string]map[int32]*offsetRequestBlock)
}
if r.blocks[topic] == nil {
r.blocks[topic] = make(map[int32]*offsetRequestBlock)
}
tmp := new(offsetRequestBlock)
tmp.time = time
tmp.maxOffsets = maxOffsets
r.blocks[topic][partitionID] = tmp
}

View File

@@ -1,26 +0,0 @@
package sarama
import "testing"
var (
offsetRequestNoBlocks = []byte{
0xFF, 0xFF, 0xFF, 0xFF,
0x00, 0x00, 0x00, 0x00}
offsetRequestOneBlock = []byte{
0xFF, 0xFF, 0xFF, 0xFF,
0x00, 0x00, 0x00, 0x01,
0x00, 0x03, 'f', 'o', 'o',
0x00, 0x00, 0x00, 0x01,
0x00, 0x00, 0x00, 0x04,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,
0x00, 0x00, 0x00, 0x02}
)
func TestOffsetRequest(t *testing.T) {
request := new(OffsetRequest)
testRequest(t, "no blocks", request, offsetRequestNoBlocks)
request.AddBlock("foo", 4, 1, 2)
testRequest(t, "one block", request, offsetRequestOneBlock)
}

View File

@@ -1,130 +0,0 @@
package sarama
type OffsetResponseBlock struct {
Err KError
Offsets []int64
}
func (r *OffsetResponseBlock) decode(pd packetDecoder) (err error) {
tmp, err := pd.getInt16()
if err != nil {
return err
}
r.Err = KError(tmp)
r.Offsets, err = pd.getInt64Array()
return err
}
func (r *OffsetResponseBlock) encode(pe packetEncoder) (err error) {
pe.putInt16(int16(r.Err))
return pe.putInt64Array(r.Offsets)
}
type OffsetResponse struct {
Blocks map[string]map[int32]*OffsetResponseBlock
}
func (r *OffsetResponse) decode(pd packetDecoder) (err error) {
numTopics, err := pd.getArrayLength()
if err != nil {
return err
}
r.Blocks = make(map[string]map[int32]*OffsetResponseBlock, numTopics)
for i := 0; i < numTopics; i++ {
name, err := pd.getString()
if err != nil {
return err
}
numBlocks, err := pd.getArrayLength()
if err != nil {
return err
}
r.Blocks[name] = make(map[int32]*OffsetResponseBlock, numBlocks)
for j := 0; j < numBlocks; j++ {
id, err := pd.getInt32()
if err != nil {
return err
}
block := new(OffsetResponseBlock)
err = block.decode(pd)
if err != nil {
return err
}
r.Blocks[name][id] = block
}
}
return nil
}
func (r *OffsetResponse) GetBlock(topic string, partition int32) *OffsetResponseBlock {
if r.Blocks == nil {
return nil
}
if r.Blocks[topic] == nil {
return nil
}
return r.Blocks[topic][partition]
}
/*
// [0 0 0 1 ntopics
0 8 109 121 95 116 111 112 105 99 topic
0 0 0 1 npartitions
0 0 0 0 id
0 0
0 0 0 1 0 0 0 0
0 1 1 1 0 0 0 1
0 8 109 121 95 116 111 112
105 99 0 0 0 1 0 0
0 0 0 0 0 0 0 1
0 0 0 0 0 1 1 1] <nil>
*/
func (r *OffsetResponse) encode(pe packetEncoder) (err error) {
if err = pe.putArrayLength(len(r.Blocks)); err != nil {
return err
}
for topic, partitions := range r.Blocks {
if err = pe.putString(topic); err != nil {
return err
}
if err = pe.putArrayLength(len(partitions)); err != nil {
return err
}
for partition, block := range partitions {
pe.putInt32(partition)
if err = block.encode(pe); err != nil {
return err
}
}
}
return nil
}
// testing API
func (r *OffsetResponse) AddTopicPartition(topic string, partition int32, offset int64) {
if r.Blocks == nil {
r.Blocks = make(map[string]map[int32]*OffsetResponseBlock)
}
byTopic, ok := r.Blocks[topic]
if !ok {
byTopic = make(map[int32]*OffsetResponseBlock)
r.Blocks[topic] = byTopic
}
byTopic[partition] = &OffsetResponseBlock{Offsets: []int64{offset}}
}

View File

@@ -1,62 +0,0 @@
package sarama
import "testing"
var (
emptyOffsetResponse = []byte{
0x00, 0x00, 0x00, 0x00}
normalOffsetResponse = []byte{
0x00, 0x00, 0x00, 0x02,
0x00, 0x01, 'a',
0x00, 0x00, 0x00, 0x00,
0x00, 0x01, 'z',
0x00, 0x00, 0x00, 0x01,
0x00, 0x00, 0x00, 0x02,
0x00, 0x00,
0x00, 0x00, 0x00, 0x02,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06}
)
func TestEmptyOffsetResponse(t *testing.T) {
response := OffsetResponse{}
testDecodable(t, "empty", &response, emptyOffsetResponse)
if len(response.Blocks) != 0 {
t.Error("Decoding produced", len(response.Blocks), "topics where there were none.")
}
}
func TestNormalOffsetResponse(t *testing.T) {
response := OffsetResponse{}
testDecodable(t, "normal", &response, normalOffsetResponse)
if len(response.Blocks) != 2 {
t.Fatal("Decoding produced", len(response.Blocks), "topics where there were two.")
}
if len(response.Blocks["a"]) != 0 {
t.Fatal("Decoding produced", len(response.Blocks["a"]), "partitions for topic 'a' where there were none.")
}
if len(response.Blocks["z"]) != 1 {
t.Fatal("Decoding produced", len(response.Blocks["z"]), "partitions for topic 'z' where there was one.")
}
if response.Blocks["z"][2].Err != ErrNoError {
t.Fatal("Decoding produced invalid error for topic z partition 2.")
}
if len(response.Blocks["z"][2].Offsets) != 2 {
t.Fatal("Decoding produced invalid number of offsets for topic z partition 2.")
}
if response.Blocks["z"][2].Offsets[0] != 5 || response.Blocks["z"][2].Offsets[1] != 6 {
t.Fatal("Decoding produced invalid offsets for topic z partition 2.")
}
}

View File

@@ -1,44 +0,0 @@
package sarama
// PacketDecoder is the interface providing helpers for reading with Kafka's encoding rules.
// Types implementing Decoder only need to worry about calling methods like GetString,
// not about how a string is represented in Kafka.
type packetDecoder interface {
// Primitives
getInt8() (int8, error)
getInt16() (int16, error)
getInt32() (int32, error)
getInt64() (int64, error)
getArrayLength() (int, error)
// Collections
getBytes() ([]byte, error)
getString() (string, error)
getInt32Array() ([]int32, error)
getInt64Array() ([]int64, error)
// Subsets
remaining() int
getSubset(length int) (packetDecoder, error)
// Stacks, see PushDecoder
push(in pushDecoder) error
pop() error
}
// PushDecoder is the interface for decoding fields like CRCs and lengths where the validity
// of the field depends on what is after it in the packet. Start them with PacketDecoder.Push() where
// the actual value is located in the packet, then PacketDecoder.Pop() them when all the bytes they
// depend upon have been decoded.
type pushDecoder interface {
// Saves the offset into the input buffer as the location to actually read the calculated value when able.
saveOffset(in int)
// Returns the length of data to reserve for the input of this encoder (eg 4 bytes for a CRC32).
reserveLength() int
// Indicates that all required data is now available to calculate and check the field.
// SaveOffset is guaranteed to have been called first. The implementation should read ReserveLength() bytes
// of data from the saved offset, and verify it based on the data between the saved offset and curOffset.
check(curOffset int, buf []byte) error
}

View File

@@ -1,41 +0,0 @@
package sarama
// PacketEncoder is the interface providing helpers for writing with Kafka's encoding rules.
// Types implementing Encoder only need to worry about calling methods like PutString,
// not about how a string is represented in Kafka.
type packetEncoder interface {
// Primitives
putInt8(in int8)
putInt16(in int16)
putInt32(in int32)
putInt64(in int64)
putArrayLength(in int) error
// Collections
putBytes(in []byte) error
putRawBytes(in []byte) error
putString(in string) error
putInt32Array(in []int32) error
putInt64Array(in []int64) error
// Stacks, see PushEncoder
push(in pushEncoder)
pop() error
}
// PushEncoder is the interface for encoding fields like CRCs and lengths where the value
// of the field depends on what is encoded after it in the packet. Start them with PacketEncoder.Push() where
// the actual value is located in the packet, then PacketEncoder.Pop() them when all the bytes they
// depend upon have been written.
type pushEncoder interface {
// Saves the offset into the input buffer as the location to actually write the calculated value when able.
saveOffset(in int)
// Returns the length of data to reserve for the output of this encoder (eg 4 bytes for a CRC32).
reserveLength() int
// Indicates that all required data is now available to calculate and write the field.
// SaveOffset is guaranteed to have been called first. The implementation should write ReserveLength() bytes
// of data to the saved offset, based on the data between the saved offset and curOffset.
run(curOffset int, buf []byte) error
}

View File

@@ -1,120 +0,0 @@
package sarama
import (
"hash"
"hash/fnv"
"math/rand"
"time"
)
// Partitioner is anything that, given a Kafka message and a number of partitions indexed [0...numPartitions-1],
// decides to which partition to send the message. RandomPartitioner, RoundRobinPartitioner and HashPartitioner are provided
// as simple default implementations.
type Partitioner interface {
Partition(message *ProducerMessage, numPartitions int32) (int32, error) // Partition takes a message and partition count and chooses a partition
// RequiresConsistency indicates to the user of the partitioner whether the mapping of key->partition is consistent or not.
// Specifically, if a partitioner requires consistency then it must be allowed to choose from all partitions (even ones known to
// be unavailable), and its choice must be respected by the caller. The obvious example is the HashPartitioner.
RequiresConsistency() bool
}
// PartitionerConstructor is the type for a function capable of constructing new Partitioners.
type PartitionerConstructor func(topic string) Partitioner
type manualPartitioner struct{}
// NewManualPartitioner returns a Partitioner which uses the partition manually set in the provided
// ProducerMessage's Partition field as the partition to produce to.
func NewManualPartitioner(topic string) Partitioner {
return new(manualPartitioner)
}
func (p *manualPartitioner) Partition(message *ProducerMessage, numPartitions int32) (int32, error) {
return message.Partition, nil
}
func (p *manualPartitioner) RequiresConsistency() bool {
return true
}
type randomPartitioner struct {
generator *rand.Rand
}
// NewRandomPartitioner returns a Partitioner which chooses a random partition each time.
func NewRandomPartitioner(topic string) Partitioner {
p := new(randomPartitioner)
p.generator = rand.New(rand.NewSource(time.Now().UTC().UnixNano()))
return p
}
func (p *randomPartitioner) Partition(message *ProducerMessage, numPartitions int32) (int32, error) {
return int32(p.generator.Intn(int(numPartitions))), nil
}
func (p *randomPartitioner) RequiresConsistency() bool {
return false
}
type roundRobinPartitioner struct {
partition int32
}
// NewRoundRobinPartitioner returns a Partitioner which walks through the available partitions one at a time.
func NewRoundRobinPartitioner(topic string) Partitioner {
return &roundRobinPartitioner{}
}
func (p *roundRobinPartitioner) Partition(message *ProducerMessage, numPartitions int32) (int32, error) {
if p.partition >= numPartitions {
p.partition = 0
}
ret := p.partition
p.partition++
return ret, nil
}
func (p *roundRobinPartitioner) RequiresConsistency() bool {
return false
}
type hashPartitioner struct {
random Partitioner
hasher hash.Hash32
}
// NewHashPartitioner returns a Partitioner which behaves as follows. If the message's key is nil, or fails to
// encode, then a random partition is chosen. Otherwise the FNV-1a hash of the encoded bytes of the message key
// is used, modulus the number of partitions. This ensures that messages with the same key always end up on the
// same partition.
func NewHashPartitioner(topic string) Partitioner {
p := new(hashPartitioner)
p.random = NewRandomPartitioner(topic)
p.hasher = fnv.New32a()
return p
}
func (p *hashPartitioner) Partition(message *ProducerMessage, numPartitions int32) (int32, error) {
if message.Key == nil {
return p.random.Partition(message, numPartitions)
}
bytes, err := message.Key.Encode()
if err != nil {
return -1, err
}
p.hasher.Reset()
_, err = p.hasher.Write(bytes)
if err != nil {
return -1, err
}
hash := int32(p.hasher.Sum32())
if hash < 0 {
hash = -hash
}
return hash % numPartitions, nil
}
func (p *hashPartitioner) RequiresConsistency() bool {
return true
}

View File

@@ -1,198 +0,0 @@
package sarama
import (
"crypto/rand"
"log"
"testing"
)
func assertPartitioningConsistent(t *testing.T, partitioner Partitioner, message *ProducerMessage, numPartitions int32) {
choice, err := partitioner.Partition(message, numPartitions)
if err != nil {
t.Error(partitioner, err)
}
if choice < 0 || choice >= numPartitions {
t.Error(partitioner, "returned partition", choice, "outside of range for", message)
}
for i := 1; i < 50; i++ {
newChoice, err := partitioner.Partition(message, numPartitions)
if err != nil {
t.Error(partitioner, err)
}
if newChoice != choice {
t.Error(partitioner, "returned partition", newChoice, "inconsistent with", choice, ".")
}
}
}
func TestRandomPartitioner(t *testing.T) {
partitioner := NewRandomPartitioner("mytopic")
choice, err := partitioner.Partition(nil, 1)
if err != nil {
t.Error(partitioner, err)
}
if choice != 0 {
t.Error("Returned non-zero partition when only one available.")
}
for i := 1; i < 50; i++ {
choice, err := partitioner.Partition(nil, 50)
if err != nil {
t.Error(partitioner, err)
}
if choice < 0 || choice >= 50 {
t.Error("Returned partition", choice, "outside of range.")
}
}
}
func TestRoundRobinPartitioner(t *testing.T) {
partitioner := NewRoundRobinPartitioner("mytopic")
choice, err := partitioner.Partition(nil, 1)
if err != nil {
t.Error(partitioner, err)
}
if choice != 0 {
t.Error("Returned non-zero partition when only one available.")
}
var i int32
for i = 1; i < 50; i++ {
choice, err := partitioner.Partition(nil, 7)
if err != nil {
t.Error(partitioner, err)
}
if choice != i%7 {
t.Error("Returned partition", choice, "expecting", i%7)
}
}
}
func TestHashPartitioner(t *testing.T) {
partitioner := NewHashPartitioner("mytopic")
choice, err := partitioner.Partition(&ProducerMessage{}, 1)
if err != nil {
t.Error(partitioner, err)
}
if choice != 0 {
t.Error("Returned non-zero partition when only one available.")
}
for i := 1; i < 50; i++ {
choice, err := partitioner.Partition(&ProducerMessage{}, 50)
if err != nil {
t.Error(partitioner, err)
}
if choice < 0 || choice >= 50 {
t.Error("Returned partition", choice, "outside of range for nil key.")
}
}
buf := make([]byte, 256)
for i := 1; i < 50; i++ {
if _, err := rand.Read(buf); err != nil {
t.Error(err)
}
assertPartitioningConsistent(t, partitioner, &ProducerMessage{Key: ByteEncoder(buf)}, 50)
}
}
func TestManualPartitioner(t *testing.T) {
partitioner := NewManualPartitioner("mytopic")
choice, err := partitioner.Partition(&ProducerMessage{}, 1)
if err != nil {
t.Error(partitioner, err)
}
if choice != 0 {
t.Error("Returned non-zero partition when only one available.")
}
for i := int32(1); i < 50; i++ {
choice, err := partitioner.Partition(&ProducerMessage{Partition: i}, 50)
if err != nil {
t.Error(partitioner, err)
}
if choice != i {
t.Error("Returned partition not the same as the input partition")
}
}
}
// By default, Sarama uses the message's key to consistently assign a partition to
// a message using hashing. If no key is set, a random partition will be chosen.
// This example shows how you can partition messages randomly, even when a key is set,
// by overriding Config.Producer.Partitioner.
func ExamplePartitioner_random() {
config := NewConfig()
config.Producer.Partitioner = NewRandomPartitioner
producer, err := NewSyncProducer([]string{"localhost:9092"}, config)
if err != nil {
log.Fatal(err)
}
defer func() {
if err := producer.Close(); err != nil {
log.Println("Failed to close producer:", err)
}
}()
msg := &ProducerMessage{Topic: "test", Key: StringEncoder("key is set"), Value: StringEncoder("test")}
partition, offset, err := producer.SendMessage(msg)
if err != nil {
log.Fatalln("Failed to produce message to kafka cluster.")
}
log.Printf("Produced message to partition %d with offset %d", partition, offset)
}
// This example shows how to assign partitions to your messages manually.
func ExamplePartitioner_manual() {
config := NewConfig()
// First, we tell the producer that we are going to partition ourselves.
config.Producer.Partitioner = NewManualPartitioner
producer, err := NewSyncProducer([]string{"localhost:9092"}, config)
if err != nil {
log.Fatal(err)
}
defer func() {
if err := producer.Close(); err != nil {
log.Println("Failed to close producer:", err)
}
}()
// Now, we set the Partition field of the ProducerMessage struct.
msg := &ProducerMessage{Topic: "test", Partition: 6, Value: StringEncoder("test")}
partition, offset, err := producer.SendMessage(msg)
if err != nil {
log.Fatalln("Failed to produce message to kafka cluster.")
}
if partition != 6 {
log.Fatal("Message should have been produced to partition 6!")
}
log.Printf("Produced message to partition %d with offset %d", partition, offset)
}
// This example shows how to set a different partitioner depending on the topic.
func ExamplePartitioner_per_topic() {
config := NewConfig()
config.Producer.Partitioner = func(topic string) Partitioner {
switch topic {
case "access_log", "error_log":
return NewRandomPartitioner(topic)
default:
return NewHashPartitioner(topic)
}
}
// ...
}

View File

@@ -1,95 +0,0 @@
package sarama
import (
"fmt"
"math"
)
type prepEncoder struct {
length int
}
// primitives
func (pe *prepEncoder) putInt8(in int8) {
pe.length += 1
}
func (pe *prepEncoder) putInt16(in int16) {
pe.length += 2
}
func (pe *prepEncoder) putInt32(in int32) {
pe.length += 4
}
func (pe *prepEncoder) putInt64(in int64) {
pe.length += 8
}
func (pe *prepEncoder) putArrayLength(in int) error {
if in > math.MaxInt32 {
return PacketEncodingError{fmt.Sprintf("array too long (%d)", in)}
}
pe.length += 4
return nil
}
// arrays
func (pe *prepEncoder) putBytes(in []byte) error {
pe.length += 4
if in == nil {
return nil
}
if len(in) > math.MaxInt32 {
return PacketEncodingError{fmt.Sprintf("byteslice too long (%d)", len(in))}
}
pe.length += len(in)
return nil
}
func (pe *prepEncoder) putRawBytes(in []byte) error {
if len(in) > math.MaxInt32 {
return PacketEncodingError{fmt.Sprintf("byteslice too long (%d)", len(in))}
}
pe.length += len(in)
return nil
}
func (pe *prepEncoder) putString(in string) error {
pe.length += 2
if len(in) > math.MaxInt16 {
return PacketEncodingError{fmt.Sprintf("string too long (%d)", len(in))}
}
pe.length += len(in)
return nil
}
func (pe *prepEncoder) putInt32Array(in []int32) error {
err := pe.putArrayLength(len(in))
if err != nil {
return err
}
pe.length += 4 * len(in)
return nil
}
func (pe *prepEncoder) putInt64Array(in []int64) error {
err := pe.putArrayLength(len(in))
if err != nil {
return err
}
pe.length += 8 * len(in)
return nil
}
// stackable
func (pe *prepEncoder) push(in pushEncoder) {
pe.length += in.reserveLength()
}
func (pe *prepEncoder) pop() error {
return nil
}

View File

@@ -1,148 +0,0 @@
package sarama
// RequiredAcks is used in Produce Requests to tell the broker how many replica acknowledgements
// it must see before responding. Any of the constants defined here are valid. On broker versions
// prior to 0.8.2.0 any other positive int16 is also valid (the broker will wait for that many
// acknowledgements) but in 0.8.2.0 and later this will raise an exception (it has been replaced
// by setting the `min.isr` value in the brokers configuration).
type RequiredAcks int16
const (
// NoResponse doesn't send any response, the TCP ACK is all you get.
NoResponse RequiredAcks = 0
// WaitForLocal waits for only the local commit to succeed before responding.
WaitForLocal RequiredAcks = 1
// WaitForAll waits for all replicas to commit before responding.
WaitForAll RequiredAcks = -1
)
type ProduceRequest struct {
RequiredAcks RequiredAcks
Timeout int32
msgSets map[string]map[int32]*MessageSet
}
func (p *ProduceRequest) encode(pe packetEncoder) error {
pe.putInt16(int16(p.RequiredAcks))
pe.putInt32(p.Timeout)
err := pe.putArrayLength(len(p.msgSets))
if err != nil {
return err
}
for topic, partitions := range p.msgSets {
err = pe.putString(topic)
if err != nil {
return err
}
err = pe.putArrayLength(len(partitions))
if err != nil {
return err
}
for id, msgSet := range partitions {
pe.putInt32(id)
pe.push(&lengthField{})
err = msgSet.encode(pe)
if err != nil {
return err
}
err = pe.pop()
if err != nil {
return err
}
}
}
return nil
}
func (p *ProduceRequest) decode(pd packetDecoder) error {
requiredAcks, err := pd.getInt16()
if err != nil {
return err
}
p.RequiredAcks = RequiredAcks(requiredAcks)
if p.Timeout, err = pd.getInt32(); err != nil {
return err
}
topicCount, err := pd.getArrayLength()
if err != nil {
return err
}
if topicCount == 0 {
return nil
}
p.msgSets = make(map[string]map[int32]*MessageSet)
for i := 0; i < topicCount; i++ {
topic, err := pd.getString()
if err != nil {
return err
}
partitionCount, err := pd.getArrayLength()
if err != nil {
return err
}
p.msgSets[topic] = make(map[int32]*MessageSet)
for j := 0; j < partitionCount; j++ {
partition, err := pd.getInt32()
if err != nil {
return err
}
messageSetSize, err := pd.getInt32()
if err != nil {
return err
}
if messageSetSize == 0 {
continue
}
msgSetDecoder, err := pd.getSubset(int(messageSetSize))
if err != nil {
return err
}
msgSet := &MessageSet{}
err = msgSet.decode(msgSetDecoder)
if err != nil {
return err
}
p.msgSets[topic][partition] = msgSet
}
}
return nil
}
func (p *ProduceRequest) key() int16 {
return 0
}
func (p *ProduceRequest) version() int16 {
return 0
}
func (p *ProduceRequest) AddMessage(topic string, partition int32, msg *Message) {
if p.msgSets == nil {
p.msgSets = make(map[string]map[int32]*MessageSet)
}
if p.msgSets[topic] == nil {
p.msgSets[topic] = make(map[int32]*MessageSet)
}
set := p.msgSets[topic][partition]
if set == nil {
set = new(MessageSet)
p.msgSets[topic][partition] = set
}
set.addMessage(msg)
}
func (p *ProduceRequest) AddSet(topic string, partition int32, set *MessageSet) {
if p.msgSets == nil {
p.msgSets = make(map[string]map[int32]*MessageSet)
}
if p.msgSets[topic] == nil {
p.msgSets[topic] = make(map[int32]*MessageSet)
}
p.msgSets[topic][partition] = set
}

View File

@@ -1,47 +0,0 @@
package sarama
import (
"testing"
)
var (
produceRequestEmpty = []byte{
0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00}
produceRequestHeader = []byte{
0x01, 0x23,
0x00, 0x00, 0x04, 0x44,
0x00, 0x00, 0x00, 0x00}
produceRequestOneMessage = []byte{
0x01, 0x23,
0x00, 0x00, 0x04, 0x44,
0x00, 0x00, 0x00, 0x01,
0x00, 0x05, 't', 'o', 'p', 'i', 'c',
0x00, 0x00, 0x00, 0x01,
0x00, 0x00, 0x00, 0xAD,
0x00, 0x00, 0x00, 0x1C,
// messageSet
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x10,
// message
0x23, 0x96, 0x4a, 0xf7, // CRC
0x00,
0x00,
0xFF, 0xFF, 0xFF, 0xFF,
0x00, 0x00, 0x00, 0x02, 0x00, 0xEE}
)
func TestProduceRequest(t *testing.T) {
request := new(ProduceRequest)
testRequest(t, "empty", request, produceRequestEmpty)
request.RequiredAcks = 0x123
request.Timeout = 0x444
testRequest(t, "header", request, produceRequestHeader)
request.AddMessage("topic", 0xAD, &Message{Codec: CompressionNone, Key: nil, Value: []byte{0x00, 0xEE}})
testRequest(t, "one message", request, produceRequestOneMessage)
}

View File

@@ -1,112 +0,0 @@
package sarama
type ProduceResponseBlock struct {
Err KError
Offset int64
}
func (pr *ProduceResponseBlock) decode(pd packetDecoder) (err error) {
tmp, err := pd.getInt16()
if err != nil {
return err
}
pr.Err = KError(tmp)
pr.Offset, err = pd.getInt64()
if err != nil {
return err
}
return nil
}
type ProduceResponse struct {
Blocks map[string]map[int32]*ProduceResponseBlock
}
func (pr *ProduceResponse) decode(pd packetDecoder) (err error) {
numTopics, err := pd.getArrayLength()
if err != nil {
return err
}
pr.Blocks = make(map[string]map[int32]*ProduceResponseBlock, numTopics)
for i := 0; i < numTopics; i++ {
name, err := pd.getString()
if err != nil {
return err
}
numBlocks, err := pd.getArrayLength()
if err != nil {
return err
}
pr.Blocks[name] = make(map[int32]*ProduceResponseBlock, numBlocks)
for j := 0; j < numBlocks; j++ {
id, err := pd.getInt32()
if err != nil {
return err
}
block := new(ProduceResponseBlock)
err = block.decode(pd)
if err != nil {
return err
}
pr.Blocks[name][id] = block
}
}
return nil
}
func (pr *ProduceResponse) encode(pe packetEncoder) error {
err := pe.putArrayLength(len(pr.Blocks))
if err != nil {
return err
}
for topic, partitions := range pr.Blocks {
err = pe.putString(topic)
if err != nil {
return err
}
err = pe.putArrayLength(len(partitions))
if err != nil {
return err
}
for id, prb := range partitions {
pe.putInt32(id)
pe.putInt16(int16(prb.Err))
pe.putInt64(prb.Offset)
}
}
return nil
}
func (pr *ProduceResponse) GetBlock(topic string, partition int32) *ProduceResponseBlock {
if pr.Blocks == nil {
return nil
}
if pr.Blocks[topic] == nil {
return nil
}
return pr.Blocks[topic][partition]
}
// Testing API
func (pr *ProduceResponse) AddTopicPartition(topic string, partition int32, err KError) {
if pr.Blocks == nil {
pr.Blocks = make(map[string]map[int32]*ProduceResponseBlock)
}
byTopic, ok := pr.Blocks[topic]
if !ok {
byTopic = make(map[int32]*ProduceResponseBlock)
pr.Blocks[topic] = byTopic
}
byTopic[partition] = &ProduceResponseBlock{Err: err}
}

View File

@@ -1,67 +0,0 @@
package sarama
import "testing"
var (
produceResponseNoBlocks = []byte{
0x00, 0x00, 0x00, 0x00}
produceResponseManyBlocks = []byte{
0x00, 0x00, 0x00, 0x02,
0x00, 0x03, 'f', 'o', 'o',
0x00, 0x00, 0x00, 0x00,
0x00, 0x03, 'b', 'a', 'r',
0x00, 0x00, 0x00, 0x02,
0x00, 0x00, 0x00, 0x01,
0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF,
0x00, 0x00, 0x00, 0x02,
0x00, 0x02,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}
)
func TestProduceResponse(t *testing.T) {
response := ProduceResponse{}
testDecodable(t, "no blocks", &response, produceResponseNoBlocks)
if len(response.Blocks) != 0 {
t.Error("Decoding produced", len(response.Blocks), "topics where there were none")
}
testDecodable(t, "many blocks", &response, produceResponseManyBlocks)
if len(response.Blocks) != 2 {
t.Error("Decoding produced", len(response.Blocks), "topics where there were 2")
}
if len(response.Blocks["foo"]) != 0 {
t.Error("Decoding produced", len(response.Blocks["foo"]), "partitions for 'foo' where there were none")
}
if len(response.Blocks["bar"]) != 2 {
t.Error("Decoding produced", len(response.Blocks["bar"]), "partitions for 'bar' where there were two")
}
block := response.GetBlock("bar", 1)
if block == nil {
t.Error("Decoding did not produce a block for bar/1")
} else {
if block.Err != ErrNoError {
t.Error("Decoding failed for bar/1/Err, got:", int16(block.Err))
}
if block.Offset != 0xFF {
t.Error("Decoding failed for bar/1/Offset, got:", block.Offset)
}
}
block = response.GetBlock("bar", 2)
if block == nil {
t.Error("Decoding did not produce a block for bar/2")
} else {
if block.Err != ErrInvalidMessage {
t.Error("Decoding failed for bar/2/Err, got:", int16(block.Err))
}
if block.Offset != 0 {
t.Error("Decoding failed for bar/2/Offset, got:", block.Offset)
}
}
}

View File

@@ -1,225 +0,0 @@
package sarama
import (
"encoding/binary"
"math"
)
type realDecoder struct {
raw []byte
off int
stack []pushDecoder
}
// primitives
func (rd *realDecoder) getInt8() (int8, error) {
if rd.remaining() < 1 {
rd.off = len(rd.raw)
return -1, ErrInsufficientData
}
tmp := int8(rd.raw[rd.off])
rd.off += 1
return tmp, nil
}
func (rd *realDecoder) getInt16() (int16, error) {
if rd.remaining() < 2 {
rd.off = len(rd.raw)
return -1, ErrInsufficientData
}
tmp := int16(binary.BigEndian.Uint16(rd.raw[rd.off:]))
rd.off += 2
return tmp, nil
}
func (rd *realDecoder) getInt32() (int32, error) {
if rd.remaining() < 4 {
rd.off = len(rd.raw)
return -1, ErrInsufficientData
}
tmp := int32(binary.BigEndian.Uint32(rd.raw[rd.off:]))
rd.off += 4
return tmp, nil
}
func (rd *realDecoder) getInt64() (int64, error) {
if rd.remaining() < 8 {
rd.off = len(rd.raw)
return -1, ErrInsufficientData
}
tmp := int64(binary.BigEndian.Uint64(rd.raw[rd.off:]))
rd.off += 8
return tmp, nil
}
func (rd *realDecoder) getArrayLength() (int, error) {
if rd.remaining() < 4 {
rd.off = len(rd.raw)
return -1, ErrInsufficientData
}
tmp := int(binary.BigEndian.Uint32(rd.raw[rd.off:]))
rd.off += 4
if tmp > rd.remaining() {
rd.off = len(rd.raw)
return -1, ErrInsufficientData
} else if tmp > 2*math.MaxUint16 {
return -1, PacketDecodingError{"invalid array length"}
}
return tmp, nil
}
// collections
func (rd *realDecoder) getBytes() ([]byte, error) {
tmp, err := rd.getInt32()
if err != nil {
return nil, err
}
n := int(tmp)
switch {
case n < -1:
return nil, PacketDecodingError{"invalid byteslice length"}
case n == -1:
return nil, nil
case n == 0:
return make([]byte, 0), nil
case n > rd.remaining():
rd.off = len(rd.raw)
return nil, ErrInsufficientData
}
tmpStr := rd.raw[rd.off : rd.off+n]
rd.off += n
return tmpStr, nil
}
func (rd *realDecoder) getString() (string, error) {
tmp, err := rd.getInt16()
if err != nil {
return "", err
}
n := int(tmp)
switch {
case n < -1:
return "", PacketDecodingError{"invalid string length"}
case n == -1:
return "", nil
case n == 0:
return "", nil
case n > rd.remaining():
rd.off = len(rd.raw)
return "", ErrInsufficientData
}
tmpStr := string(rd.raw[rd.off : rd.off+n])
rd.off += n
return tmpStr, nil
}
func (rd *realDecoder) getInt32Array() ([]int32, error) {
if rd.remaining() < 4 {
rd.off = len(rd.raw)
return nil, ErrInsufficientData
}
n := int(binary.BigEndian.Uint32(rd.raw[rd.off:]))
rd.off += 4
if rd.remaining() < 4*n {
rd.off = len(rd.raw)
return nil, ErrInsufficientData
}
if n == 0 {
return nil, nil
}
if n < 0 {
return nil, PacketDecodingError{"invalid array length"}
}
ret := make([]int32, n)
for i := range ret {
ret[i] = int32(binary.BigEndian.Uint32(rd.raw[rd.off:]))
rd.off += 4
}
return ret, nil
}
func (rd *realDecoder) getInt64Array() ([]int64, error) {
if rd.remaining() < 4 {
rd.off = len(rd.raw)
return nil, ErrInsufficientData
}
n := int(binary.BigEndian.Uint32(rd.raw[rd.off:]))
rd.off += 4
if rd.remaining() < 8*n {
rd.off = len(rd.raw)
return nil, ErrInsufficientData
}
if n == 0 {
return nil, nil
}
if n < 0 {
return nil, PacketDecodingError{"invalid array length"}
}
ret := make([]int64, n)
for i := range ret {
ret[i] = int64(binary.BigEndian.Uint64(rd.raw[rd.off:]))
rd.off += 8
}
return ret, nil
}
// subsets
func (rd *realDecoder) remaining() int {
return len(rd.raw) - rd.off
}
func (rd *realDecoder) getSubset(length int) (packetDecoder, error) {
if length > rd.remaining() {
rd.off = len(rd.raw)
return nil, ErrInsufficientData
}
start := rd.off
rd.off += length
return &realDecoder{raw: rd.raw[start:rd.off]}, nil
}
// stacks
func (rd *realDecoder) push(in pushDecoder) error {
in.saveOffset(rd.off)
reserve := in.reserveLength()
if rd.remaining() < reserve {
rd.off = len(rd.raw)
return ErrInsufficientData
}
rd.stack = append(rd.stack, in)
rd.off += reserve
return nil
}
func (rd *realDecoder) pop() error {
// this is go's ugly pop pattern (the inverse of append)
in := rd.stack[len(rd.stack)-1]
rd.stack = rd.stack[:len(rd.stack)-1]
return in.check(rd.off, rd.raw)
}

View File

@@ -1,100 +0,0 @@
package sarama
import "encoding/binary"
type realEncoder struct {
raw []byte
off int
stack []pushEncoder
}
// primitives
func (re *realEncoder) putInt8(in int8) {
re.raw[re.off] = byte(in)
re.off += 1
}
func (re *realEncoder) putInt16(in int16) {
binary.BigEndian.PutUint16(re.raw[re.off:], uint16(in))
re.off += 2
}
func (re *realEncoder) putInt32(in int32) {
binary.BigEndian.PutUint32(re.raw[re.off:], uint32(in))
re.off += 4
}
func (re *realEncoder) putInt64(in int64) {
binary.BigEndian.PutUint64(re.raw[re.off:], uint64(in))
re.off += 8
}
func (re *realEncoder) putArrayLength(in int) error {
re.putInt32(int32(in))
return nil
}
// collection
func (re *realEncoder) putRawBytes(in []byte) error {
copy(re.raw[re.off:], in)
re.off += len(in)
return nil
}
func (re *realEncoder) putBytes(in []byte) error {
if in == nil {
re.putInt32(-1)
return nil
}
re.putInt32(int32(len(in)))
copy(re.raw[re.off:], in)
re.off += len(in)
return nil
}
func (re *realEncoder) putString(in string) error {
re.putInt16(int16(len(in)))
copy(re.raw[re.off:], in)
re.off += len(in)
return nil
}
func (re *realEncoder) putInt32Array(in []int32) error {
err := re.putArrayLength(len(in))
if err != nil {
return err
}
for _, val := range in {
re.putInt32(val)
}
return nil
}
func (re *realEncoder) putInt64Array(in []int64) error {
err := re.putArrayLength(len(in))
if err != nil {
return err
}
for _, val := range in {
re.putInt64(val)
}
return nil
}
// stacks
func (re *realEncoder) push(in pushEncoder) {
in.saveOffset(re.off)
re.off += in.reserveLength()
re.stack = append(re.stack, in)
}
func (re *realEncoder) pop() error {
// this is go's ugly pop pattern (the inverse of append)
in := re.stack[len(re.stack)-1]
re.stack = re.stack[:len(re.stack)-1]
return in.run(re.off, re.raw)
}

View File

@@ -1,100 +0,0 @@
package sarama
import (
"encoding/binary"
"fmt"
"io"
)
type requestBody interface {
encoder
decoder
key() int16
version() int16
}
type request struct {
correlationID int32
clientID string
body requestBody
}
func (r *request) encode(pe packetEncoder) (err error) {
pe.push(&lengthField{})
pe.putInt16(r.body.key())
pe.putInt16(r.body.version())
pe.putInt32(r.correlationID)
err = pe.putString(r.clientID)
if err != nil {
return err
}
err = r.body.encode(pe)
if err != nil {
return err
}
return pe.pop()
}
func (r *request) decode(pd packetDecoder) (err error) {
var key int16
if key, err = pd.getInt16(); err != nil {
return err
}
var version int16
if version, err = pd.getInt16(); err != nil {
return err
}
if r.correlationID, err = pd.getInt32(); err != nil {
return err
}
r.clientID, err = pd.getString()
r.body = allocateBody(key, version)
if r.body == nil {
return PacketDecodingError{fmt.Sprintf("unknown request key (%d)", key)}
}
return r.body.decode(pd)
}
func decodeRequest(r io.Reader) (req *request, err error) {
lengthBytes := make([]byte, 4)
if _, err := io.ReadFull(r, lengthBytes); err != nil {
return nil, err
}
length := int32(binary.BigEndian.Uint32(lengthBytes))
if length <= 4 || length > MaxRequestSize {
return nil, PacketDecodingError{fmt.Sprintf("message of length %d too large or too small", length)}
}
encodedReq := make([]byte, length)
if _, err := io.ReadFull(r, encodedReq); err != nil {
return nil, err
}
req = &request{}
if err := decode(encodedReq, req); err != nil {
return nil, err
}
return req, nil
}
func allocateBody(key, version int16) requestBody {
switch key {
case 0:
return &ProduceRequest{}
case 1:
return &FetchRequest{}
case 2:
return &OffsetRequest{}
case 3:
return &MetadataRequest{}
case 8:
return &OffsetCommitRequest{Version: version}
case 9:
return &OffsetFetchRequest{}
case 10:
return &ConsumerMetadataRequest{}
}
return nil
}

View File

@@ -1,80 +0,0 @@
package sarama
import (
"bytes"
"reflect"
"testing"
)
type testRequestBody struct {
}
func (s *testRequestBody) key() int16 {
return 0x666
}
func (s *testRequestBody) version() int16 {
return 0xD2
}
func (s *testRequestBody) encode(pe packetEncoder) error {
return pe.putString("abc")
}
// not specific to request tests, just helper functions for testing structures that
// implement the encoder or decoder interfaces that needed somewhere to live
func testEncodable(t *testing.T, name string, in encoder, expect []byte) {
packet, err := encode(in)
if err != nil {
t.Error(err)
} else if !bytes.Equal(packet, expect) {
t.Error("Encoding", name, "failed\ngot ", packet, "\nwant", expect)
}
}
func testDecodable(t *testing.T, name string, out decoder, in []byte) {
err := decode(in, out)
if err != nil {
t.Error("Decoding", name, "failed:", err)
}
}
func testRequest(t *testing.T, name string, rb requestBody, expected []byte) {
// Encoder request
req := &request{correlationID: 123, clientID: "foo", body: rb}
packet, err := encode(req)
headerSize := 14 + len("foo")
if err != nil {
t.Error(err)
} else if !bytes.Equal(packet[headerSize:], expected) {
t.Error("Encoding", name, "failed\ngot ", packet, "\nwant", expected)
}
// Decoder request
decoded, err := decodeRequest(bytes.NewReader(packet))
if err != nil {
t.Error("Failed to decode request", err)
} else if decoded.correlationID != 123 || decoded.clientID != "foo" {
t.Errorf("Decoded header is not valid: %v", decoded)
} else if !reflect.DeepEqual(rb, decoded.body) {
t.Errorf("Decoded request does not match the encoded one\nencoded: %v\ndecoded: %v", rb, decoded)
}
}
func testResponse(t *testing.T, name string, res encoder, expected []byte) {
encoded, err := encode(res)
if err != nil {
t.Error(err)
} else if expected != nil && !bytes.Equal(encoded, expected) {
t.Error("Encoding", name, "failed\ngot ", encoded, "\nwant", expected)
}
decoded := reflect.New(reflect.TypeOf(res).Elem()).Interface().(decoder)
if err := decode(encoded, decoded); err != nil {
t.Error("Decoding", name, "failed:", err)
}
if !reflect.DeepEqual(decoded, res) {
t.Errorf("Decoded response does not match the encoded one\nencoded: %#v\ndecoded: %#v", res, decoded)
}
}

View File

@@ -1,21 +0,0 @@
package sarama
import "fmt"
type responseHeader struct {
length int32
correlationID int32
}
func (r *responseHeader) decode(pd packetDecoder) (err error) {
r.length, err = pd.getInt32()
if err != nil {
return err
}
if r.length <= 4 || r.length > MaxResponseSize {
return PacketDecodingError{fmt.Sprintf("message of length %d too large or too small", r.length)}
}
r.correlationID, err = pd.getInt32()
return err
}

View File

@@ -1,21 +0,0 @@
package sarama
import "testing"
var (
responseHeaderBytes = []byte{
0x00, 0x00, 0x0f, 0x00,
0x0a, 0xbb, 0xcc, 0xff}
)
func TestResponseHeader(t *testing.T) {
header := responseHeader{}
testDecodable(t, "response header", &header, responseHeaderBytes)
if header.length != 0xf00 {
t.Error("Decoding header length failed, got", header.length)
}
if header.correlationID != 0x0abbccff {
t.Error("Decoding header correlation id failed, got", header.correlationID)
}
}

View File

@@ -1,47 +0,0 @@
/*
Package sarama provides client libraries for the Kafka 0.8 protocol. The AsyncProducer object is the high-level
API for producing messages asynchronously; the SyncProducer provides a blocking API for the same purpose.
The Consumer object is the high-level API for consuming messages. The Client object provides metadata
management functionality that is shared between the higher-level objects.
For lower-level needs, the Broker and Request/Response objects permit precise control over each connection
and message sent on the wire.
The Request/Response objects and properties are mostly undocumented, as they line up exactly with the
protocol fields documented by Kafka at https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol
*/
package sarama
import (
"io/ioutil"
"log"
)
// Logger is the instance of a StdLogger interface that Sarama writes connection
// management events to. By default it is set to discard all log messages via ioutil.Discard,
// but you can set it to redirect wherever you want.
var Logger StdLogger = log.New(ioutil.Discard, "[Sarama] ", log.LstdFlags)
// StdLogger is used to log error messages.
type StdLogger interface {
Print(v ...interface{})
Printf(format string, v ...interface{})
Println(v ...interface{})
}
// PanicHandler is called for recovering from panics spawned internally to the library (and thus
// not recoverable by the caller's goroutine). Defaults to nil, which means panics are not recovered.
var PanicHandler func(interface{})
// MaxRequestSize is the maximum size (in bytes) of any request that Sarama will attempt to send. Trying
// to send a request larger than this will result in an PacketEncodingError. The default of 100 MiB is aligned
// with Kafka's default `socket.request.max.bytes`, which is the largest request the broker will attempt
// to process.
var MaxRequestSize int32 = 100 * 1024 * 1024
// MaxResponseSize is the maximum size (in bytes) of any response that Sarama will attempt to parse. If
// a broker returns a response message larger than this value, Sarama will return a PacketDecodingError to
// protect the client from running out of memory. Please note that brokers do not have any natural limit on
// the size of responses they send. In particular, they can send arbitrarily large fetch responses to consumers
// (see https://issues.apache.org/jira/browse/KAFKA-2063).
var MaxResponseSize int32 = 100 * 1024 * 1024

View File

@@ -1,41 +0,0 @@
package sarama
import (
"bytes"
"encoding/binary"
"github.com/golang/snappy"
)
var snappyMagic = []byte{130, 83, 78, 65, 80, 80, 89, 0}
// SnappyEncode encodes binary data
func snappyEncode(src []byte) []byte {
return snappy.Encode(nil, src)
}
// SnappyDecode decodes snappy data
func snappyDecode(src []byte) ([]byte, error) {
if bytes.Equal(src[:8], snappyMagic) {
var (
pos = uint32(16)
max = uint32(len(src))
dst = make([]byte, 0, len(src))
chunk []byte
err error
)
for pos < max {
size := binary.BigEndian.Uint32(src[pos : pos+4])
pos += 4
chunk, err = snappy.Decode(chunk, src[pos:pos+size])
if err != nil {
return nil, err
}
pos += size
dst = append(dst, chunk...)
}
return dst, nil
}
return snappy.Decode(nil, src)
}

File diff suppressed because one or more lines are too long

View File

@@ -1,94 +0,0 @@
package sarama
import "sync"
// SyncProducer publishes Kafka messages. It routes messages to the correct broker, refreshing metadata as appropriate,
// and parses responses for errors. You must call Close() on a producer to avoid leaks, it may not be garbage-collected automatically when
// it passes out of scope.
type SyncProducer interface {
// SendMessage produces a given message, and returns only when it either has succeeded or failed to produce.
// It will return the partition and the offset of the produced message, or an error if the message
// failed to produce.
SendMessage(msg *ProducerMessage) (partition int32, offset int64, err error)
// Close shuts down the producer and flushes any messages it may have buffered. You must call this function before
// a producer object passes out of scope, as it may otherwise leak memory. You must call this before calling Close
// on the underlying client.
Close() error
}
type syncProducer struct {
producer *asyncProducer
wg sync.WaitGroup
}
// NewSyncProducer creates a new SyncProducer using the given broker addresses and configuration.
func NewSyncProducer(addrs []string, config *Config) (SyncProducer, error) {
p, err := NewAsyncProducer(addrs, config)
if err != nil {
return nil, err
}
return newSyncProducerFromAsyncProducer(p.(*asyncProducer)), nil
}
// NewSyncProducerFromClient creates a new SyncProducer using the given client. It is still
// necessary to call Close() on the underlying client when shutting down this producer.
func NewSyncProducerFromClient(client Client) (SyncProducer, error) {
p, err := NewAsyncProducerFromClient(client)
if err != nil {
return nil, err
}
return newSyncProducerFromAsyncProducer(p.(*asyncProducer)), nil
}
func newSyncProducerFromAsyncProducer(p *asyncProducer) *syncProducer {
p.conf.Producer.Return.Successes = true
p.conf.Producer.Return.Errors = true
sp := &syncProducer{producer: p}
sp.wg.Add(2)
go withRecover(sp.handleSuccesses)
go withRecover(sp.handleErrors)
return sp
}
func (sp *syncProducer) SendMessage(msg *ProducerMessage) (partition int32, offset int64, err error) {
oldMetadata := msg.Metadata
defer func() {
msg.Metadata = oldMetadata
}()
expectation := make(chan error, 1)
msg.Metadata = expectation
sp.producer.Input() <- msg
if err := <-expectation; err != nil {
return -1, -1, err
} else {
return msg.Partition, msg.Offset, nil
}
}
func (sp *syncProducer) handleSuccesses() {
defer sp.wg.Done()
for msg := range sp.producer.Successes() {
expectation := msg.Metadata.(chan error)
expectation <- nil
}
}
func (sp *syncProducer) handleErrors() {
defer sp.wg.Done()
for err := range sp.producer.Errors() {
expectation := err.Msg.Metadata.(chan error)
expectation <- err.Err
}
}
func (sp *syncProducer) Close() error {
sp.producer.AsyncClose()
sp.wg.Wait()
return nil
}

View File

@@ -1,149 +0,0 @@
package sarama
import (
"log"
"sync"
"testing"
)
func TestSyncProducer(t *testing.T) {
seedBroker := newMockBroker(t, 1)
leader := newMockBroker(t, 2)
metadataResponse := new(MetadataResponse)
metadataResponse.AddBroker(leader.Addr(), leader.BrokerID())
metadataResponse.AddTopicPartition("my_topic", 0, leader.BrokerID(), nil, nil, ErrNoError)
seedBroker.Returns(metadataResponse)
prodSuccess := new(ProduceResponse)
prodSuccess.AddTopicPartition("my_topic", 0, ErrNoError)
for i := 0; i < 10; i++ {
leader.Returns(prodSuccess)
}
producer, err := NewSyncProducer([]string{seedBroker.Addr()}, nil)
if err != nil {
t.Fatal(err)
}
for i := 0; i < 10; i++ {
msg := &ProducerMessage{
Topic: "my_topic",
Value: StringEncoder(TestMessage),
Metadata: "test",
}
partition, offset, err := producer.SendMessage(msg)
if partition != 0 || msg.Partition != partition {
t.Error("Unexpected partition")
}
if offset != 0 || msg.Offset != offset {
t.Error("Unexpected offset")
}
if str, ok := msg.Metadata.(string); !ok || str != "test" {
t.Error("Unexpected metadata")
}
if err != nil {
t.Error(err)
}
}
safeClose(t, producer)
leader.Close()
seedBroker.Close()
}
func TestConcurrentSyncProducer(t *testing.T) {
seedBroker := newMockBroker(t, 1)
leader := newMockBroker(t, 2)
metadataResponse := new(MetadataResponse)
metadataResponse.AddBroker(leader.Addr(), leader.BrokerID())
metadataResponse.AddTopicPartition("my_topic", 0, leader.BrokerID(), nil, nil, ErrNoError)
seedBroker.Returns(metadataResponse)
prodSuccess := new(ProduceResponse)
prodSuccess.AddTopicPartition("my_topic", 0, ErrNoError)
leader.Returns(prodSuccess)
config := NewConfig()
config.Producer.Flush.Messages = 100
producer, err := NewSyncProducer([]string{seedBroker.Addr()}, config)
if err != nil {
t.Fatal(err)
}
wg := sync.WaitGroup{}
for i := 0; i < 100; i++ {
wg.Add(1)
go func() {
msg := &ProducerMessage{Topic: "my_topic", Value: StringEncoder(TestMessage)}
partition, _, err := producer.SendMessage(msg)
if partition != 0 {
t.Error("Unexpected partition")
}
if err != nil {
t.Error(err)
}
wg.Done()
}()
}
wg.Wait()
safeClose(t, producer)
leader.Close()
seedBroker.Close()
}
func TestSyncProducerToNonExistingTopic(t *testing.T) {
broker := newMockBroker(t, 1)
metadataResponse := new(MetadataResponse)
metadataResponse.AddBroker(broker.Addr(), broker.BrokerID())
metadataResponse.AddTopicPartition("my_topic", 0, broker.BrokerID(), nil, nil, ErrNoError)
broker.Returns(metadataResponse)
config := NewConfig()
config.Metadata.Retry.Max = 0
config.Producer.Retry.Max = 0
producer, err := NewSyncProducer([]string{broker.Addr()}, config)
if err != nil {
t.Fatal(err)
}
metadataResponse = new(MetadataResponse)
metadataResponse.AddTopic("unknown", ErrUnknownTopicOrPartition)
broker.Returns(metadataResponse)
_, _, err = producer.SendMessage(&ProducerMessage{Topic: "unknown"})
if err != ErrUnknownTopicOrPartition {
t.Error("Uxpected ErrUnknownTopicOrPartition, found:", err)
}
safeClose(t, producer)
broker.Close()
}
// This example shows the basic usage pattern of the SyncProducer.
func ExampleSyncProducer() {
producer, err := NewSyncProducer([]string{"localhost:9092"}, nil)
if err != nil {
log.Fatalln(err)
}
defer func() {
if err := producer.Close(); err != nil {
log.Fatalln(err)
}
}()
msg := &ProducerMessage{Topic: "my_topic", Value: StringEncoder("testing 123")}
partition, offset, err := producer.SendMessage(msg)
if err != nil {
log.Printf("FAILED to send message: %s\n", err)
} else {
log.Printf("> message sent to partition %d at offset %d\n", partition, offset)
}
}

View File

@@ -1,10 +0,0 @@
# Sarama tools
This folder contains applications that are useful for exploration of your Kafka cluster, or instrumentation.
Some of these tools mirror tools that ship with Kafka, but these tools won't require installing the JVM to function.
- [kafka-console-producer](./kafka-console-producer): a command line tool to produce a single message to your Kafka custer.
- [kafka-console-partitionconsumer](./kafka-console-partitionconsumer): (deprecated) a command line tool to consume a single partition of a topic on your Kafka cluster.
- [kafka-console-consumer](./kafka-console-consumer): a command line tool to consume arbitrary partitions of a topic on your Kafka cluster.
To install all tools, run `go get github.com/Shopify/sarama/tools/...`

View File

@@ -1,2 +0,0 @@
kafka-console-consumer
kafka-console-consumer.test

View File

@@ -1,29 +0,0 @@
# kafka-console-consumer
A simple command line tool to consume partitions of a topic and print the
messages on the standard output.
### Installation
go get github.com/Shopify/sarama/tools/kafka-console-consumer
### Usage
# Minimum invocation
kafka-console-consumer -topic=test -brokers=kafka1:9092
# It will pick up a KAFKA_PEERS environment variable
export KAFKA_PEERS=kafka1:9092,kafka2:9092,kafka3:9092
kafka-console-consumer -topic=test
# You can specify the offset you want to start at. It can be either
# `oldest`, `newest`. The default is `newest`.
kafka-console-consumer -topic=test -offset=oldest
kafka-console-consumer -topic=test -offset=newest
# You can specify the partition(s) you want to consume as a comma-separated
# list. The default is `all`.
kafka-console-consumer -topic=test -partitions=1,2,3
# Display all command line options
kafka-console-consumer -help

View File

@@ -1,145 +0,0 @@
package main
import (
"flag"
"fmt"
"log"
"os"
"os/signal"
"strconv"
"strings"
"sync"
"github.com/Shopify/sarama"
)
var (
brokerList = flag.String("brokers", os.Getenv("KAFKA_PEERS"), "The comma separated list of brokers in the Kafka cluster")
topic = flag.String("topic", "", "REQUIRED: the topic to consume")
partitions = flag.String("partitions", "all", "The partitions to consume, can be 'all' or comma-separated numbers")
offset = flag.String("offset", "newest", "The offset to start with. Can be `oldest`, `newest`")
verbose = flag.Bool("verbose", false, "Whether to turn on sarama logging")
bufferSize = flag.Int("buffer-size", 256, "The buffer size of the message channel.")
logger = log.New(os.Stderr, "", log.LstdFlags)
)
func main() {
flag.Parse()
if *brokerList == "" {
printUsageErrorAndExit("You have to provide -brokers as a comma-separated list, or set the KAFKA_PEERS environment variable.")
}
if *topic == "" {
printUsageErrorAndExit("-topic is required")
}
if *verbose {
sarama.Logger = logger
}
var initialOffset int64
switch *offset {
case "oldest":
initialOffset = sarama.OffsetOldest
case "newest":
initialOffset = sarama.OffsetNewest
default:
printUsageErrorAndExit("-offset should be `oldest` or `newest`")
}
c, err := sarama.NewConsumer(strings.Split(*brokerList, ","), nil)
if err != nil {
printErrorAndExit(69, "Failed to start consumer: %s", err)
}
partitionList, err := getPartitions(c)
if err != nil {
printErrorAndExit(69, "Failed to get the list of partitions: %s", err)
}
var (
messages = make(chan *sarama.ConsumerMessage, *bufferSize)
closing = make(chan struct{})
wg sync.WaitGroup
)
go func() {
signals := make(chan os.Signal, 1)
signal.Notify(signals, os.Kill, os.Interrupt)
<-signals
logger.Println("Initiating shutdown of consumer...")
close(closing)
}()
for _, partition := range partitionList {
pc, err := c.ConsumePartition(*topic, partition, initialOffset)
if err != nil {
printErrorAndExit(69, "Failed to start consumer for partition %d: %s", partition, err)
}
go func(pc sarama.PartitionConsumer) {
<-closing
pc.AsyncClose()
}(pc)
wg.Add(1)
go func(pc sarama.PartitionConsumer) {
defer wg.Done()
for message := range pc.Messages() {
messages <- message
}
}(pc)
}
go func() {
for msg := range messages {
fmt.Printf("Partition:\t%d\n", msg.Partition)
fmt.Printf("Offset:\t%d\n", msg.Offset)
fmt.Printf("Key:\t%s\n", string(msg.Key))
fmt.Printf("Value:\t%s\n", string(msg.Value))
fmt.Println()
}
}()
wg.Wait()
logger.Println("Done consuming topic", *topic)
close(messages)
if err := c.Close(); err != nil {
logger.Println("Failed to close consumer: ", err)
}
}
func getPartitions(c sarama.Consumer) ([]int32, error) {
if *partitions == "all" {
return c.Partitions(*topic)
}
tmp := strings.Split(*partitions, ",")
var pList []int32
for i := range tmp {
val, err := strconv.ParseInt(tmp[i], 10, 32)
if err != nil {
return nil, err
}
pList = append(pList, int32(val))
}
return pList, nil
}
func printErrorAndExit(code int, format string, values ...interface{}) {
fmt.Fprintf(os.Stderr, "ERROR: %s\n", fmt.Sprintf(format, values...))
fmt.Fprintln(os.Stderr)
os.Exit(code)
}
func printUsageErrorAndExit(format string, values ...interface{}) {
fmt.Fprintf(os.Stderr, "ERROR: %s\n", fmt.Sprintf(format, values...))
fmt.Fprintln(os.Stderr)
fmt.Fprintln(os.Stderr, "Available command line options:")
flag.PrintDefaults()
os.Exit(64)
}

Some files were not shown because too many files have changed in this diff Show More