Compare commits

...

1080 Commits

Author SHA1 Message Date
Gunnar Aasen
40c37aadc6 Refactor to use AggregatingOutput 2018-05-07 08:21:00 -07:00
Gunnar Aasen
93a579d7e4 Output: Azure Monitor: Cleanup and add README 2018-05-07 08:20:49 -07:00
Gunnar Aasen
5c4c3a1ca9 Output: Azure Monitor: Initial aggregated metric implementation 2018-05-02 09:29:58 -07:00
Mark Simms
02d86b1b6e Starting on azure monitor metrics integration with MSI auth 2018-05-02 09:29:58 -07:00
Daniel Nelson
4f61d2a09c Add idea for an output that aggregates before adding to metric buffer 2018-05-01 16:47:16 -07:00
Daniel Nelson
c03e8918a2 Update changelog, add mcrouter to README 2018-05-01 12:01:08 -07:00
Craig Thayer
83345ec2b3 Add input plugin for McRouter (#4077) 2018-05-01 11:58:15 -07:00
Daniel Nelson
f094f83da5 Update changelog 2018-04-30 19:21:12 -07:00
Mariusz Brzeski
0768022240 Support busybox ping in the ping input (#3877) 2018-04-30 19:20:13 -07:00
Daniel Nelson
92956104d6 Update changelog 2018-04-30 17:51:04 -07:00
Daniel Nelson
964856eb5f Fix win_perf_counters to collect counters per instance (#4036) 2018-04-30 17:48:45 -07:00
Daniel Nelson
377547aa4c Document one field per line requirement in logparser 2018-04-30 16:15:51 -07:00
Grégoire Bellon-Gervais
1662b6feb9 Metrics values have same names as old cassandra plugin (#4080) 2018-04-27 15:12:59 -07:00
Daniel Nelson
908170b207 Update changelog 2018-04-27 14:56:31 -07:00
Vincent Caron
ec47cab950 Use same timestamp for fields in system input (#4078) 2018-04-27 14:55:10 -07:00
Daniel Nelson
06671777e9 Update changelog 2018-04-25 19:02:00 -07:00
Adrián López
46a8bdbfe5 Add parameter to force the interval of gather for sysstat (#4068) 2018-04-25 18:59:42 -07:00
Daniel Nelson
abdff033cc Note options that only work with influxdb HTTP 2018-04-25 13:47:16 -07:00
Daniel Nelson
535e9e9a68 Update changelog 2018-04-25 13:47:16 -07:00
Jack Zampolin
c256f17870 Fix timeout parsing error in nvidia_smi (#4070) 2018-04-24 14:40:19 -07:00
Yosuke Hara
b8d5df2076 Add support for LeoFS v1.4 to leofs input (#4044) 2018-04-24 14:14:31 -07:00
Daniel Nelson
538baee8a4 Fix nightly build 2018-04-24 13:42:42 -07:00
Daniel Nelson
d3d8d52e2f Fix links to jolokia example configs 2018-04-24 12:46:40 -07:00
Daniel Nelson
286f14f730 Update changelog 2018-04-23 15:15:08 -07:00
Daniel Nelson
9f4752ba12 Add docker input server version (#4035) 2018-04-23 15:09:04 -07:00
Daniel Nelson
f639f994b5 Ignore writer error in file output (#4055) 2018-04-23 15:08:04 -07:00
Daniel Nelson
911f0e4b57 Deprecate the cassandra input plugin (#4050) 2018-04-23 15:06:26 -07:00
Daniel Nelson
86a3b8cad4 Update changelog 2018-04-23 14:01:38 -07:00
Daniel Nelson
a3500cc33a Fix handling of floats with multiple leading zeroes (#4065) 2018-04-23 13:29:49 -07:00
Daniel Nelson
bf0c59f56c Return errors in mongodb SSL/TLS configuration (#4066) 2018-04-23 13:29:12 -07:00
Fred Cox
c7b3667ac4 Add server argument as first argument in unbound input (#4062) 2018-04-23 13:27:29 -07:00
Daniel Nelson
638853be05 Update changelog 2018-04-20 18:49:55 -07:00
Daniel Nelson
ee9a2f73a1 Fix duplicate tags when overriding tag (#4056) 2018-04-20 18:39:31 -07:00
Daniel Nelson
648d7ae922 Run 32-bit tests in CircleCI 2018-04-20 15:10:22 -07:00
Daniel Nelson
13937d511d Update changelog 2018-04-20 15:05:39 -07:00
Daniel Nelson
fe4d3cd117 Fix ints being capped at 32-bits on 32-bit archs (#4054) 2018-04-20 14:56:28 -07:00
Leandro Piccilli
eacf11fcd8 Update gopsutils to include fixes for #4037 and #3750 (#4045) 2018-04-20 14:32:19 -07:00
Daniel Nelson
3a8ca4d08d Update changelog 2018-04-19 16:58:59 -07:00
Daniel Nelson
00e3363d45 Add only valid field types in cassandra input (#4048) 2018-04-19 16:56:46 -07:00
Daniel Nelson
29b37e67c2 Allow metrics to be unserializable in influx.Reader (#4047)
Metrics that are unserializable will be logged at debug level, but the
rest of the batch will be sent.  Unserializable metrics can occur during
normal operation such as if you remove all fields from a metric or the
metric cannot fit within the line size limit.
2018-04-19 16:24:31 -07:00
Daniel Nelson
42fee824f8 Update changelog 2018-04-18 16:57:15 -07:00
Daniel Nelson
120be7e87b Report available fields if utmp is unreadable (#4043) 2018-04-18 16:55:18 -07:00
Daniel Nelson
9e4a330ee5 Update github.com/gorilla/mux version (#4042) 2018-04-18 16:55:02 -07:00
Daniel Nelson
78d4a95ce6 Test using Go 1.8-1.10; official builds with 1.10 (#4041) 2018-04-18 16:14:06 -07:00
Daniel Nelson
571ce86d10 Update changelog 2018-04-18 12:14:58 -07:00
Daniel Nelson
dd2c60e620 Fix graphite serialization of unsigned ints (#4033) 2018-04-18 12:13:25 -07:00
Daniel Nelson
1486ae25c0 Tidy up last change to socket listener/writer 2018-04-17 17:48:30 -07:00
Daniel Nelson
da5b46e770 Update changelog 2018-04-17 17:36:35 -07:00
Matt
9ef902f4a1 Add snmp input option to strip non fixed length index suffixes (#4025) 2018-04-17 17:34:39 -07:00
Daniel Nelson
058510464c Update changelog 2018-04-17 17:03:18 -07:00
Bob Shannon
0b4f4b089f Add TLS support to socket_writer and socket_listener plugins (#4021) 2018-04-17 17:02:04 -07:00
Daniel Nelson
7c592558d8 Update changelog 2018-04-17 15:45:49 -07:00
James Maidment
1e1d9e8acb Update mem values to gauge (#4034) 2018-04-17 15:43:10 -07:00
Daniel Nelson
3b3d16273d Update changelog adding nvidia_smi 2018-04-17 13:43:36 -07:00
Jack Zampolin
3046f957d5 Add nvidia_smi input to monitor nvidia GPUs (#4026) 2018-04-17 13:40:55 -07:00
Daniel Nelson
bcf1cf59c1 Fix docs about outputs and fieldpass/fielddrop
This has been allowed since 1.1.0
2018-04-17 13:35:27 -07:00
Daniel Nelson
c8d2ba2bc8 Remove RateLimiter tests due to race conditions
These tests are fundamentally racy, removing to improve reliability of
test cases.
2018-04-16 18:52:52 -07:00
Daniel Nelson
04ab9a4fe4 Set 1.6 release date in changelog 2018-04-16 12:04:31 -07:00
Daniel Nelson
e4009234e9 Fix HashID conflicts in pathological cases
Use "\n" as delimiter as it cannot occur in the series name.
2018-04-12 18:09:31 -07:00
Daniel Nelson
8d516d26e9 Fix MQTT sample config 2018-04-12 14:34:55 -07:00
Daniel Nelson
0a02363c03 Update changelog 2018-04-11 16:52:40 -07:00
jvassev
2c19d74829 Prevent loading config twice in K8S (#3999)
When config dir is mounted from configmap, filepath.Walk() finds the same
.conf file twice as 20-acme.conf is a link to ..data/20-acme.conf for example.

This patch skips all folder names starting with '..' which is pretty
uncommon and mainly used by Kubernetes mounts.
2018-04-11 16:51:19 -07:00
Daniel Nelson
3f4e1af222 Add --console and --service to usage message in Windows (#3993) 2018-04-11 16:44:55 -07:00
Daniel Nelson
10c7324d74 Update changelog 2018-04-10 18:18:27 -07:00
Daniel Nelson
55cfc383f3 Allow grok pattern to contain newlines (#4005) 2018-04-10 18:16:21 -07:00
Daniel Nelson
7b8f12b377 Update changelog 2018-04-10 18:15:02 -07:00
Daniel Nelson
15f19375e7 Typesetting changes to fibaro README 2018-04-10 18:14:27 -07:00
Pierrick Brossin
93e2381f42 Add Fibaro input plugin (#2741) 2018-04-10 18:04:58 -07:00
Daniel Nelson
387bae9b9f Fix host ordering in mongodb unit tests 2018-04-10 17:24:40 -07:00
Daniel Nelson
34416e0da8 Updated changelog 2018-04-10 17:11:25 -07:00
Jake Champlin
32f56140a3 Add per-host shard metrics in mongodb input (#3819) 2018-04-10 17:10:29 -07:00
Boris Schrijver
64a23c0b18 Fix make test-ci run (#4002) 2018-04-10 15:35:58 -07:00
Daniel Nelson
af68975e2f Document that InfluxDB input metrics vary with version 2018-04-09 19:30:18 -07:00
Daniel Nelson
0223b22b3e Update changelog 2018-04-09 17:06:34 -07:00
Daniel Nelson
1890efbb70 Rename repl_oplog_window_s to repl_oplog_window_sec
To match existing metric style.
2018-04-09 17:05:45 -07:00
Daniel Nelson
e4f8a82ee6 Fix newline escaping in line protocol (#3992) 2018-04-09 15:29:52 -07:00
Daniel Nelson
a28de4b5cd Update changelog 2018-04-06 16:45:07 -07:00
Daniel Nelson
caac224276 Add details about MongoDB permissions 2018-04-06 16:43:03 -07:00
Daniel Nelson
fe31ce9d7d Modernize mongodb docs 2018-04-06 16:36:03 -07:00
Matvey Kruglov
01ede2ea0b Add repl_oplog_window_s metric to mongodb input (#3964) 2018-04-06 16:34:47 -07:00
alekseyp
fb6390e7ab Fix typo in phpfpm README (#3985) 2018-04-06 16:20:36 -07:00
Mark Wilkinson - m82labs
ff40da6019 Use explicit casts to avoid datatype issues (#3980) 2018-04-06 14:58:33 -07:00
Daniel Nelson
43a044542e Update changelog 2018-04-06 13:19:02 -07:00
Daniel Nelson
00203fa889 Export all vars defined in /etc/default/telegraf (#3981)
This keeps the format of this file the same between systemd and
sysvinit.
2018-04-06 13:17:24 -07:00
Daniel Nelson
7177e0473f Fix conversion of unsigned ints in prometheus output (#3978) 2018-04-05 16:38:41 -07:00
Daniel Nelson
252101b7c6 Update changelog 2018-04-05 11:19:01 -07:00
Daniel Nelson
efdf36746c Update gosnmp revision (#3973) 2018-04-05 11:15:20 -07:00
Daniel Nelson
df78133bf3 Log error if scheme is unsupported 2018-04-05 11:08:31 -07:00
Jeff Ashton
bf915fa79c Fix https in InfluxDB output (#3976) 2018-04-05 10:50:32 -07:00
Daniel Nelson
c160b56229 Fix build.py next_version 2018-04-04 21:53:20 -07:00
Daniel Nelson
627f0e5d9d Use automatic extension naming when running go build 2018-04-04 19:00:28 -07:00
Daniel Nelson
4551b4c5d2 Enable ntpq tests on Windows (#3972) 2018-04-04 18:35:05 -07:00
Daniel Nelson
a9afd2f030 Add config-directory documentation for Windows service 2018-04-04 16:30:22 -07:00
Daniel Nelson
caf860bc88 Don't print name of plugin or interval size during --test 2018-04-04 16:30:22 -07:00
Daniel Nelson
beeab2c509 Sort field names when running --test 2018-04-04 16:30:22 -07:00
Scott Anderson
a50acadc44 Add details about why not all logstash patterns are supported (#3971) 2018-04-04 14:42:58 -07:00
Daniel Nelson
265d0e6d84 Fix bug preventing database from being recreated (#3962) 2018-04-02 16:18:33 -07:00
Daniel Nelson
413cf6dd23 Set next version to 1.7 on master 2018-04-02 14:44:09 -07:00
Daniel Nelson
7b23287e20 Update sample telegraf.conf 2018-04-02 14:40:51 -07:00
Daniel Nelson
f4c0aac898 Update changelog 2018-04-02 14:34:25 -07:00
Daniel Nelson
bcaaeda49c Fix precision truncation when no timestamp included (#3961) 2018-04-02 14:32:33 -07:00
Daniel Nelson
9d2f3fcbb9 Update changelog 2018-04-02 14:31:36 -07:00
Daniel Nelson
0aad487cab Fix go vet and use go test -race 2018-04-02 14:30:46 -07:00
Daniel Nelson
19c102cf4b Fix parsing of dos line endings in smart input (#3960) 2018-04-02 13:55:10 -07:00
Daniel Nelson
109c1a4344 Update gopsutil version to v2.18.03 2018-04-02 13:54:16 -07:00
Daniel Nelson
82448a9dd1 Add metric_version option to mysql input (#3954) 2018-04-02 13:10:43 -07:00
Daniel Nelson
64b239663c Add mutex to influx parser 2018-04-02 12:52:23 -07:00
Daniel Nelson
7e3ec16e15 Allow empty string field values 2018-03-30 16:57:35 -07:00
Mark Wilkinson - m82labs
a971ffb880 Remove host tag from Database IO v2 Query (#3953) 2018-03-30 13:22:24 -07:00
Daniel Nelson
461c0dccd8 Update changelog 2018-03-30 13:20:07 -07:00
Daniel Nelson
971debb582 Add filters for container state to docker input (#3950) 2018-03-30 13:17:48 -07:00
Daniel Nelson
6d585beedf Fix http_proxy variable name in http_response plugin 2018-03-30 11:11:12 -07:00
Daniel Nelson
38ec968b0b Move Handler interface into machine where it is used 2018-03-30 11:11:12 -07:00
Daniel Nelson
0c1293ad5e Add MmapStats when using rocksdb storage engine (#3930) 2018-03-29 13:32:05 -07:00
Daniel Nelson
b99cd14129 Add influx uint support as a runtime option (#3948) 2018-03-29 13:31:43 -07:00
Daniel Nelson
c2108fcf09 Update changelog 2018-03-29 12:44:33 -07:00
Daniel Nelson
04b9afff68 Add TLS support to zookeeper input (#3949) 2018-03-29 12:42:25 -07:00
Matt
a320f91516 Add line protocol uint64 support (#3946) 2018-03-28 16:43:25 -07:00
Daniel Nelson
ef112e6ee7 Revert to 'f' formatting for floats in line protocol
Using 'g' with -1 precision switches over to scientific notation for too
small of numbers, and setting a larger precision results in larger than
desired representations.
2018-03-28 14:38:39 -07:00
rabhis
5be1198274 Reconnect AMQP consumer to broker (#3947) 2018-03-28 14:00:56 -07:00
Daniel Nelson
8a73dc05c0 Update changelog 2018-03-27 18:38:39 -07:00
Daniel Nelson
43bd23e555 Add support for connecting to InfluxDB over a unix domain socket (#3942) 2018-03-27 18:36:08 -07:00
Daniel Nelson
b0b18df0bf Update changelog 2018-03-27 18:07:37 -07:00
Daniel Nelson
cc97b48ca8 Add support for skipping database creation (#3941) 2018-03-27 17:59:57 -07:00
Daniel Nelson
36b8220181 Update changelog 2018-03-27 17:44:13 -07:00
Daniel Nelson
1c0f63a90d Add new line protocol parser and serializer, influxdb output (#3924) 2018-03-27 17:30:51 -07:00
Daniel Nelson
503881d4d7 Update to latest fsnotify release 2018-03-27 17:21:00 -07:00
Daniel Nelson
63de4ffc51 Update to latest gopsutil release 2018-03-27 15:12:57 -07:00
Daniel Nelson
4cefe3eadd Add documentation for existing TLS settings in consul input (#3931) 2018-03-26 19:22:17 -07:00
Daniel Nelson
b63073deb2 Update DC/OS guidance for cardinality 2018-03-26 19:20:27 -07:00
Daniel Nelson
e60abdf8ea Update passenger input documentation (#3938) 2018-03-26 19:11:08 -07:00
Daniel Nelson
e5e75a62cc Update changelog 2018-03-23 19:17:03 -07:00
Daniel Nelson
a4870e6a6d Fix DC/OS URL creation race (#3932) 2018-03-23 19:14:07 -07:00
Daniel Nelson
3469e74dd9 Update changelog 2018-03-23 11:57:42 -07:00
Evan Kinney
def76ace3b Add HTTP basic auth support to the http_listener input (#3496) 2018-03-23 11:56:49 -07:00
Daniel Nelson
05393da939 Update changelog 2018-03-23 11:54:06 -07:00
Daniel Nelson
e8fc3ca70c Add TLS support to kapacitor input (#3927) 2018-03-23 11:53:18 -07:00
Daniel Nelson
729388f4dd Update changelog 2018-03-23 11:52:00 -07:00
Daniel Nelson
be9d4f4be0 Add tag for target url to phpfpm input (#3928) 2018-03-23 11:50:52 -07:00
Daniel Nelson
3658ac8f53 Skip hanging test on darwin 2018-03-22 14:41:37 -07:00
Mark Wilkinson - m82labs
d7f279e3d3 Fix SQL Server 2008 compatibility (#3916)
* Fixed a bug in the performance counter query when run against SQL Server 2016 SP1-CU2. The performance counter DMV contains duplicate entries which are not handled by the query.

* Adding more stats related to workload groups.

* Adding new RG stats, removing "host" tag

* Removed workload group query

* Fixing some 2008 compat issues, removed the host field from the result set.

* Adding fixes for SQL Server 2008 compat around RG columns. Also converted perf counter query to support named instances.
2018-03-22 10:38:40 -07:00
Daniel Nelson
e28f422d21 Update fsnotify path in license of dependencies 2018-03-21 23:06:14 -07:00
Daniel Nelson
cd919066d5 Use copy of fsnotify from tail vendor 2018-03-21 10:23:28 -07:00
Pierre Tessier
6200683c29 Remove noisy debug message from Wavefront output (#3899) 2018-03-19 11:04:35 -07:00
Daniel Nelson
76ce71f7fa Fix breaker stat name in elasticsearch comment 2018-03-16 12:58:39 -07:00
Daniel Nelson
2160779126 Set 1.5.3 release date 2018-03-14 16:32:18 -07:00
Daniel Nelson
6e5e2f713d Update changelog 2018-03-14 12:10:16 -07:00
Jonas Hahnfeld
8e515688eb Add output of stderr in case of error to exec log message (#3862)
If the command failed with a non-zero exit status there might be an error
message on stderr. Append the first line to the error message to ease the
search for its cause.
2018-03-14 12:09:01 -07:00
Daniel Nelson
6d6631382c Remove gdm -parallel false 2018-03-14 11:36:03 -07:00
Daniel Nelson
f1b681cbdc Use previous image on appveyor 2018-03-14 11:02:33 -07:00
Daniel Nelson
4118ec7629 Update changelog 2018-03-13 21:09:54 -07:00
Chris Ottinger
f114f6a124 Added config flag to skip collection of network protocol metrics (#3880) 2018-03-13 21:08:21 -07:00
Daniel Nelson
8cfd001441 Disable parallel restore in gdm
May be aggrevating timeout issue on appveyor builds.
2018-03-13 20:44:51 -07:00
Daniel Nelson
9ce70aad77 Use Go 1.9.4 for builds 2018-03-09 14:37:26 -08:00
Patrick Hemmer
07dbbb27dc Fix socket_listener setting ReadBufferSize on TCP sockets (#3874) 2018-03-09 09:44:35 -08:00
Daniel Nelson
0e14e31b0a Update changelog 2018-03-08 15:16:46 -08:00
Daniel Nelson
8b3767fd6e Update http_response documentation 2018-03-08 15:13:26 -08:00
Germán Jaber
81a93fcddf Add result related tags and fields to http_response (#3814) 2018-03-08 14:55:59 -08:00
Margarita Bliznikova
8005883de8 Fix intermittent TestTailBadLine failures (#3869) 2018-03-08 13:03:48 -08:00
Daniel Nelson
f7207f514e Update changelog 2018-03-08 10:54:03 -08:00
Dennis Schön
f1c8abd68c Fix uptime metric in passenger input plugin (#3871) 2018-03-08 10:52:58 -08:00
Daniel Nelson
e4ce057885 Update changelog 2018-03-07 14:17:11 -08:00
dilshatm
a6d366fb84 Fix collation difference in sqlserver input (#3786) 2018-03-07 14:16:17 -08:00
Daniel Nelson
de22480e7d Update example config 2018-03-07 13:49:04 -08:00
Daniel Nelson
2b65915b96 Update docs for addition of override processor 2018-03-07 13:47:54 -08:00
Karsten Schnitter
9d8b1b1e87 Add override processor (#3773)
This plugin can perform the standard metric modifications using override semantics.
2018-03-07 13:27:43 -08:00
Daniel Nelson
b9ddbbd5ed Update changelog 2018-03-06 13:12:16 -08:00
Margarita Bliznikova
c377c8fb7c Add host to ping timeout log message (#3853) 2018-03-06 13:10:44 -08:00
Daniel Nelson
45c22e42da Update changelog 2018-03-06 12:12:26 -08:00
Marcel
ad5e954047 Add ability to override proxy from environment in http response (#3626) 2018-03-06 12:11:38 -08:00
Demian Dekoninck
93b2870b28 Add link to ServiceInput interface in contributing guide (#3828) 2018-03-06 10:14:14 -08:00
Patrick O'Keeffe
3501b65f7c Improve fail2ban use_sudo docs (#3852) 2018-03-05 16:32:45 -08:00
Jiri Tyr
35378ae9cc Fixing error in snmp example config (#3855) 2018-03-05 15:29:26 -08:00
Daniel Nelson
1212b2ddc5 Update changelog 2018-03-05 15:28:09 -08:00
Carl Pacey
0a37386c5e Add sum stat to basicstats aggregator (#3797) 2018-03-05 15:26:31 -08:00
Daniel Nelson
00a52a67b9 Update changelog 2018-03-05 11:42:01 -08:00
Michael Boudreau
dc96c34e2c Add Solr 3 compatibility (#3799) 2018-03-05 11:41:10 -08:00
Daniel Nelson
5928219454 Update changelog 2018-02-25 01:06:44 -08:00
Daniel Nelson
8c932abff6 Disable keepalive in mqtt output. (#3779)
This functionality currently has race conditions that can result in the
output deadlocking.
2018-02-25 01:04:04 -08:00
Daniel Nelson
fcd6d4eb09 Update changelog 2018-02-20 17:33:25 -08:00
Pranay Kanwar
b355536b20 Convert boolean metric values to float in datadog output (#3804) 2018-02-20 17:32:18 -08:00
Daniel Nelson
e988c83068 Update changelog 2018-02-20 16:07:10 -08:00
Fred Cox
80d9417315 Add server option to unbound plugin (#3713) 2018-02-20 16:06:13 -08:00
Daniel Nelson
f4fa05530a Update changelog 2018-02-20 13:56:44 -08:00
Jake Champlin
18aef35c58 Add shard server stats to the mongodb input plugin (#3808) 2018-02-20 13:55:56 -08:00
Daniel Nelson
8147d60973 Fix metric buffer limit in internal plugin after reload 2018-02-19 20:55:28 -08:00
Daniel Nelson
df80fa6099 Update changelog 2018-02-16 14:08:11 -08:00
S
53221d87eb Add option to disable labels in prometheus output for string fields (#3765) 2018-02-16 14:07:26 -08:00
Dwight Spencer
ddde8809f4 Add README.md for mqtt output (#3764) 2018-02-16 13:51:20 -08:00
Daniel Nelson
0ca3900abe Method new dropwizard parser more prominently. 2018-02-15 20:33:57 -08:00
Daniel Nelson
a777ce9293 Update changelog 2018-02-15 20:11:52 -08:00
everpcpc
3242f97deb Support deadline in ping plugin (#3783) 2018-02-15 20:11:07 -08:00
Daniel Nelson
6e35071c89 Update changelog 2018-02-15 20:06:01 -08:00
Jorge Canha
cd620ac144 Add keep alive support to the TCP mode of statsd (#3781) 2018-02-15 20:04:49 -08:00
Daniel Nelson
6406abbc89 Update changelog 2018-02-15 19:09:50 -08:00
Daniel Nelson
9aabf56795 Use proxy from environment in http input 2018-02-15 19:06:22 -08:00
Daniel Nelson
4ac78d5c6d Deprecate httpjson input 2018-02-15 19:06:22 -08:00
Daniel Nelson
3fe3d75bb3 Add configurable method to http input 2018-02-15 19:06:22 -08:00
Daniel Nelson
a55456b56c Check parser initialization earlier 2018-02-15 19:06:22 -08:00
Daniel Nelson
6c656d92a0 Add url tag only if not already set 2018-02-15 19:06:20 -08:00
Daniel Nelson
2ee270f274 Allow setting basic auth with empty username 2018-02-15 19:05:36 -08:00
Daniel Nelson
5b37fd3ae9 Update http input documentation 2018-02-15 19:05:36 -08:00
Nicolas Grange
f82f03b92c Add http input plugin which supports any input data format (#3546) 2018-02-15 16:00:10 -08:00
Mark Wilkinson - m82labs
42ccc9f324 Added additional SQL Server performance counters (#3770)
* Included system databases in server properties, added backup/restore throughput counter, error counters, and user settable counters.

* Added more resource governor counters.

* Added Target and Total Server Memory counters.

* Removed the c_type tag from the performance counters query, added more documentation instead.
2018-02-09 12:45:22 -08:00
Daniel Nelson
a00d5b48f8 Update changelog 2018-02-09 12:13:07 -08:00
efficks
f5ea13a9ab Fix ping plugin not reporting zero durations (#3778) 2018-02-09 12:11:19 -08:00
Daniel Nelson
32dd1b3725 Adjust time of nightly build 2018-02-07 18:37:33 -08:00
Daniel Nelson
1b0e87a8b0 Update changelog 2018-02-07 18:37:32 -08:00
Daniel Nelson
efa9095829 Add TLS support to the mesos input plugin (#3769) 2018-02-07 18:36:38 -08:00
Daniel Nelson
89974d96d7 Install new requirements for fpm gem install 2018-02-06 11:33:18 -08:00
Daniel Nelson
8c51d629eb Update changelog 2018-02-05 12:55:22 -08:00
Andy Cobaugh
ea0be51985 Add additional metrics and reverse metric names option to openldap (#3722) 2018-02-05 12:48:41 -08:00
Daniel Nelson
5639d5608d Update paho mqtt to latest release 2018-02-05 12:20:14 -08:00
Daniel Nelson
9a1d69a2ae Update changelog 2018-02-05 11:17:36 -08:00
Philipp Weber
b7a68eef56 Remove userinfo from url tag in prometheus input (#3743) 2018-02-05 11:16:00 -08:00
Daniel Nelson
be688ec761 Update sample config in contributing docs 2018-02-02 12:31:31 -08:00
Daniel Nelson
3208fc32ee Run nightly build sequentially 2018-02-02 12:31:31 -08:00
Daniel Nelson
1f87c10dd4 Fix Makefile on Windows and use in AppVeyor build (#3748) 2018-02-02 12:25:59 -08:00
Pierre Tessier
281f4d3688 Fix example source_override values in wavefront output (#3744) 2018-02-02 10:51:00 -08:00
Daniel Nelson
3dcf66aed6 Update gitignore 2018-02-01 16:18:01 -08:00
Daniel Nelson
01479af096 Update changelog 2018-02-01 16:12:57 -08:00
Daniel Nelson
23933e1139 Improve procstat readme 2018-02-01 16:12:08 -08:00
Ben Aldrich
a7571d5730 Add native Go method for finding pids to procstat (#3559) 2018-02-01 15:14:27 -08:00
Paul Myjavec
12d62e60b3 Use CircleCI 2.0 for builds (#3731) 2018-02-01 15:05:39 -08:00
Daniel Nelson
4153d2ca42 Update changelog 2018-02-01 12:15:24 -08:00
Daniel Nelson
8c8c9200e7 Update sqlserver readme; enable query_version = 2 in default config
If unset, query_version is still treated as version 1 for compatibility.
2018-02-01 12:13:49 -08:00
Mark Wilkinson - m82labs
426360d61f Add new sql server output data model (#3618) 2018-02-01 11:50:26 -08:00
John Eismeier
86e08e6ce7 Fix spelling mistakes in zipkin and apache inputs (#3741) 2018-02-01 11:15:12 -08:00
Daniel Nelson
a462b555a7 Update changelog 2018-02-01 11:13:14 -08:00
Philipp Weber
a2635573a8 Add TLS and http basic_auth to prometheus_client output (#3719) 2018-02-01 11:12:16 -08:00
Daniel Nelson
ec8e923fda Update documetation style for smart input 2018-02-01 10:51:03 -08:00
Daniel Nelson
d43e8262b7 Update changelog 2018-01-31 12:30:03 -08:00
Laurent Sesquès
7b365180d0 Add Ipset input plugin (#3346) 2018-01-31 12:25:27 -08:00
Daniel Nelson
32732d42f8 Update changelog 2018-01-30 18:08:31 -08:00
Daniel Nelson
10e51e4b49 Set path to / if HOST_MOUNT_PREFIX matches full path (#3736) 2018-01-30 18:06:53 -08:00
Daniel Nelson
3a85e7b1f0 Set release date for 1.5.2 2018-01-30 14:00:34 -08:00
Daniel Nelson
5d87ad85a1 Update changelog 2018-01-30 14:00:33 -08:00
Daniel Nelson
c28d0e1b16 Exclude master_replid fields from redis input (#3725) 2018-01-30 13:58:44 -08:00
Daniel Nelson
1b0a4e49cd Do not build nats input on freebsd
This plugin can work on freebsd, but will only build with cgo
enabled.  For now, disable this plugin to avoid this requirement.
2018-01-30 12:04:21 -08:00
Daniel Nelson
f9c48ee2f0 Update changelog 2018-01-29 14:02:44 -08:00
Philipp Weber
1b84ac08ab Add support for setting bsd source address to the ping input (#3726) 2018-01-29 14:01:00 -08:00
Daniel Nelson
bcefe90846 Update changelog 2018-01-29 12:16:15 -08:00
Ildar Svetlov
da12c64791 Add ability to select which queues will be gathered to rabbitmq input (#3702) 2018-01-29 12:14:49 -08:00
Daniel Nelson
de03ee3caa Update nats readme 2018-01-26 17:15:02 -08:00
Daniel Nelson
fbd3544a9d Add nats input to readme and changelog 2018-01-26 15:18:15 -08:00
Menno Finlay-Smits
fb947e8fe7 Add NATS Monitoring Input Plugin (#3674) 2018-01-26 15:14:54 -08:00
Daniel Nelson
5b130b6ea0 Update changelog 2018-01-26 15:03:19 -08:00
Ivan Lopez
48092ed598 Add RabbitMQ cluster and running nodes count and running node status (#3703) 2018-01-26 15:00:58 -08:00
Daniel Nelson
efb9d5b4cb Return Accumulator interface from NewAccumulator 2018-01-26 11:40:34 -08:00
Peter
c17427631d Expand on tagvalue option in postgresql_extensible (#3720) 2018-01-25 15:38:11 -08:00
Daniel Nelson
8527a1b7b8 Update changelog 2018-01-25 15:05:46 -08:00
Logan
d831dbc51d Allow running as console application on Windows (#2754) 2018-01-25 15:04:09 -08:00
Daniel Nelson
f9c0aa1e23 Update changelog 2018-01-25 13:47:21 -08:00
Pierre Tessier
3e4c91880a Add timeout to wavefront output write (#3711) 2018-01-25 13:44:25 -08:00
Daniel Nelson
899c3a2ae1 Update changelog 2018-01-22 12:06:10 -08:00
Daniel Nelson
4558aeddeb Remove graphite serializer replacement of dot with underscore in field key (#3705) 2018-01-22 12:04:16 -08:00
Daniel Nelson
36c9113917 Update changelog 2018-01-22 12:01:09 -08:00
Daniel Nelson
5270aa451c Avoid loop creation in second processor pass (#3656) 2018-01-22 11:16:07 -08:00
Daniel Nelson
91fc2765b1 Limit wait time for writes in mqtt output (#3699) 2018-01-22 11:15:13 -08:00
Daniel Nelson
ef776f120b Make error loading tls config fatal in mysql input 2018-01-19 12:16:28 -08:00
Daniel Nelson
5bac08662e Update changelog 2018-01-18 17:39:03 -08:00
Piotr Popieluch
601dc99606 Align aggregator period with internal ticker to avoid skipping metrics (#3693)
By the time the aggregator.run() was called about 600ms already passed since setting now which was skewing up the aggregation intervals and skipping metrics.
2018-01-18 17:37:53 -08:00
Daniel Nelson
0f55d9eba2 Update changelog 2018-01-17 15:28:35 -08:00
Piotr Popieluch
f374a295d9 Reconnect before sending graphite metrics if disconnected (#3680) 2018-01-17 15:27:24 -08:00
Daniel Nelson
548157852c Update changelog 2018-01-17 15:14:07 -08:00
Daniel Nelson
822cfbc8e8 Add support for using globs in devices list of diskio input plugin (#3687) 2018-01-17 15:12:05 -08:00
Daniel Nelson
fa5f1bf6d9 Use go-redis for the redis input (#3661) 2018-01-17 14:57:46 -08:00
Daniel Nelson
ad921a3840 Update changelog 2018-01-17 14:38:47 -08:00
Michael Boudreau
9d559292a5 Fix index out of bounds error in solr input plugin (#3683) 2018-01-17 14:37:34 -08:00
Daniel Nelson
6e24056757 Update changelog 2018-01-16 13:46:41 -08:00
Noah Crowley
87830a1c38 Ignore empty lines in Graphite plaintext (#3684) 2018-01-16 13:44:56 -08:00
atzoum
d188b78d9e Add string and boolean support to dropwizard; fix escaping of tags/fields (#3664) 2018-01-16 12:12:14 -08:00
Daniel Nelson
6e4650da3a Update changelog 2018-01-12 17:46:20 -08:00
Jacob McCann
7ab0d50116 Add container health metrics to docker input (#3666) 2018-01-12 17:43:51 -08:00
Daniel Nelson
97f6c9d8e1 Skip intermittent test on CircleCI 2018-01-12 16:49:05 -08:00
Jacob McCann
666eb47613 Listen on localhost interface in tests (#3667) 2018-01-12 12:08:19 -08:00
Daniel Nelson
90b6b760d1 Set 1.5.1 release date 2018-01-10 13:27:58 -08:00
Daniel Nelson
f3147cc44d Skip CircleCI test of tail plugin due to intermittent deadlock 2018-01-09 15:01:20 -08:00
Daniel Nelson
3cf0ba1ccf Pin crate docker image for testing 2018-01-09 13:10:40 -08:00
Daniel Nelson
2b972dcd56 Update release notes 2018-01-08 16:29:59 -08:00
Daniel Nelson
ce06d0cee0 Update changelog 2018-01-08 15:21:20 -08:00
Daniel Nelson
24ae3293bc Update changelog 2018-01-08 15:18:31 -08:00
Daniel Nelson
0ddb1d26a0 Add gjson and match to dependency license file 2018-01-08 15:15:23 -08:00
atzoum
317de40ac4 Add support for dropwizard input format (#2846) 2018-01-08 15:11:36 -08:00
Daniel Nelson
9cfa3b292b Reorder httpjson config to keep variables out of toml table 2018-01-08 15:06:58 -08:00
Daniel Nelson
0bf63a29f1 Update changelog 2018-01-05 16:04:12 -08:00
James
1d86064fb7 Use persistent connection to postgresql database (#2701) 2018-01-05 16:03:09 -08:00
Daniel Nelson
53e7537c5c Fix link to cratedb readme 2018-01-05 16:01:06 -08:00
Daniel Nelson
6dd5c3b2c0 Update changelog 2018-01-05 16:00:44 -08:00
Daniel Nelson
2938c2fa79 Add user privilege level setting to IPMI sensors (#3643) 2018-01-05 15:59:25 -08:00
Daniel Nelson
35f1b9f500 Update changelog 2018-01-05 14:56:54 -08:00
gerardocorea92
ae848e9539 Add available_entropy field to kernel input plugin (#3524) 2018-01-05 14:54:29 -08:00
Daniel Nelson
163f18f959 Update release notes for 1.5 2018-01-04 18:05:21 -08:00
Daniel Nelson
37757b7782 Add link to docs for configuring the openldap monitoring backend 2018-01-04 15:34:55 -08:00
Daniel Nelson
315fd1e987 Update changelog 2018-01-04 15:29:56 -08:00
Daniel Nelson
b0c2bb870e Escape environment variables during config toml parsing (#3637) 2018-01-04 15:28:00 -08:00
Daniel Nelson
11c6a7f9c9 Update changelog 2018-01-03 17:47:13 -08:00
Richard Elling
92acef1664 Add support for additional metrics on Linux in zfs input (#3565) 2018-01-03 17:45:48 -08:00
Daniel Nelson
5397c02570 Update changelog 2018-01-03 17:40:37 -08:00
kerams
87f1d45ee0 Add support for exchanges to RabbitMQ input (#3619) 2018-01-03 17:38:11 -08:00
Daniel Nelson
07cb749e04 Update changelog 2018-01-03 13:44:33 -08:00
kerams
acea7109d4 Fix deliver_get field in rabbitmq input (#3633) 2018-01-03 13:43:17 -08:00
Daniel Nelson
009b649a13 Update changelog 2018-01-02 16:38:20 -08:00
Daniel Nelson
b900967b78 Add wired field to mem input (#3632) 2018-01-02 16:37:11 -08:00
Daniel Nelson
81f42e8b17 Update changelog 2018-01-02 16:36:04 -08:00
Adam Johnson
56be3d3236 Reintroduce AWS credential check to cloudwatch output (#3587) 2018-01-02 16:33:15 -08:00
Daniel Nelson
a440ed8d8c Add information about how to set permissions for postfix input (#3594) 2018-01-02 14:09:14 -08:00
Daniel Nelson
06c21fb9f7 Update changelog 2017-12-28 16:24:04 -08:00
Daniel Nelson
4f7afb8cb5 Set content-type charset in influxdb output and allow it be overridden (#3593) 2017-12-28 16:22:19 -08:00
Daniel Nelson
ef6e5c5a85 Update changelog 2017-12-28 16:19:04 -08:00
Daniel Nelson
005face7c0 Fix DC/OS login expiration time (#3625) 2017-12-28 16:17:40 -08:00
Daniel Nelson
1011cd0c94 Update changelog 2017-12-28 16:12:56 -08:00
Daniel Nelson
6c075c4346 Fix name error in jolokia2_agent sample config (#3624) 2017-12-28 16:10:00 -08:00
Daniel Nelson
7f3f556b39 Fix grammar in haproxy docs 2017-12-21 18:46:03 -08:00
Daniel Nelson
6639f44c17 Fix grammar in dcos readme 2017-12-21 16:26:50 -08:00
Daniel Nelson
801a248668 Update changelog 2017-12-18 20:39:26 -08:00
kerams
496452144c Add messages_delivered_get to rabbitmq_overview (#3596) 2017-12-18 20:36:59 -08:00
Daniel Nelson
3029d58cad Update changelog 2017-12-14 16:59:58 -08:00
Jeff Ashton
fcc9c82d34 Add control over which stats to gather in basicstats aggregator (#3580) 2017-12-14 16:56:10 -08:00
Daniel Nelson
4f1ea13ebf Update bond input description 2017-12-14 16:03:29 -08:00
timhallinflux
b90ee4a43c Improve bond plugin description (#3588) 2017-12-14 15:59:20 -08:00
Daniel Nelson
4537eb2c5d Update haproxy documentation 2017-12-14 15:50:03 -08:00
Daniel Nelson
d6fd9ce738 Set release date for 1.5.0 2017-12-14 10:58:33 -08:00
Daniel Nelson
5b40173bcb Remove AWS credential check from cloudwatch output (#3583)
This method is reported to not work with IAM Instance Profiles, and we
do not want to make any calls that would require additional permissions.
2017-12-13 17:51:55 -08:00
Brian Knight
6638fc68de Update README with missing Redis measurements (#3582) 2017-12-13 11:24:48 -08:00
Antoine Augusti
9ad0297b1f Fix refType documentation for GitHub webhooks (#3579) 2017-12-13 11:22:47 -08:00
Daniel Nelson
15266bb7eb Update changelog 2017-12-13 11:17:36 -08:00
Ildar Svetlov
d935dfa9ed Don't add system input uptime_format as a counter (#3578) 2017-12-13 11:13:56 -08:00
Daniel Nelson
8785c7d78d Update changelog 2017-12-13 10:58:50 -08:00
Logan
fb3d66cdd3 Typo and sentence consistency (#3581) 2017-12-13 10:51:15 -08:00
Daniel Nelson
de180d1e56 Update changelog 2017-12-12 13:32:47 -08:00
Mike Danko
df9c7590b3 Fix various mysql data type conversions (#3554) 2017-12-12 13:22:11 -08:00
Steve Banik
d7d224d511 Fixed typo in README.md (#3574) 2017-12-12 11:21:32 -08:00
Daniel Nelson
abcad439eb Update changelog 2017-12-11 18:01:50 -08:00
Daniel Nelson
8484de6c12 Fix separation of multiple prometheus_client outputs (#3570) 2017-12-11 18:00:19 -08:00
Daniel Nelson
ab8376de03 Update exec plugin documentation 2017-12-11 17:58:06 -08:00
Daniel Nelson
ff634c5056 Update changelog 2017-12-11 15:34:52 -08:00
Daniel Nelson
14b31a2354 Add idle state to processes test 2017-12-11 15:33:44 -08:00
Ted Zlatanov
663a5b1f50 Support I (idle) process state on procfs+Linux (#3530) 2017-12-11 15:31:52 -08:00
Daniel Nelson
93d16a4603 Use auto type detection for scanned devices in smart input (#3561) 2017-12-08 18:03:12 -08:00
Daniel Nelson
88746b01c3 Update changelog 2017-12-08 18:02:01 -08:00
Daniel Nelson
37095ef47d Update sarama-cluster to latest release (#3560) 2017-12-08 17:59:06 -08:00
Daniel Nelson
4f42d8a298 Add benchmark test for single metric 2017-12-08 13:23:08 -08:00
Daniel Nelson
574034c301 Use device name instead of abs path for devices tag in smart input (#3550) 2017-12-08 13:22:41 -08:00
Daniel Nelson
654e953a89 Update changelog 2017-12-07 11:32:54 -08:00
Arkady Emelyanov
4d91162abd Add health status mapping from string to int in elasticsearch input (#3551) 2017-12-07 11:31:03 -08:00
Daniel Nelson
177e7e2c73 Log connect error only in wavefront output (#3549) 2017-12-06 14:55:29 -08:00
Daniel Nelson
d8966d5067 Fix formatting in changelog 2017-12-04 13:18:14 -08:00
Daniel Nelson
bdda6ceb70 Update next version number for dev builds 2017-12-01 11:52:46 -08:00
Daniel Nelson
ca8911fec0 Update example config 2017-12-01 11:49:07 -08:00
Daniel Nelson
2c5a5373f6 Update changelog 2017-12-01 11:42:00 -08:00
Daniel Nelson
cabe10b88a Update changelog 2017-12-01 11:23:18 -08:00
Daniel Nelson
7f66863b87 Fix HOST_MOUNT_PREFIX in docker with disk input (#3529) 2017-12-01 11:21:39 -08:00
Daniel Nelson
e400ec2b57 Update changelog 2017-11-30 18:42:14 -08:00
Daniel Nelson
44320a5421 Add option to amqp output to publish persistent messages (#3528) 2017-11-30 18:40:12 -08:00
Daniel Nelson
a9951710b3 Add time import 2017-11-29 17:05:13 -08:00
Daniel Nelson
6426bca1f8 Update changelog 2017-11-29 16:36:00 -08:00
Nathan Ferch
f92a4f528f Add input plugin for OpenBSD/FreeBSD pf (#3405) 2017-11-29 16:32:50 -08:00
Daniel Nelson
3ba5458220 Update changelog 2017-11-29 12:17:46 -08:00
Bob Shannon
beb9d7560d Add support for glob patterns in net input plugin (#3140) 2017-11-29 12:16:34 -08:00
Daniel Nelson
24d82aebe6 Update changelog 2017-11-29 12:10:56 -08:00
Daniel Nelson
7dc256e845 Update gopsutil version to include netstat fix (#3513) 2017-11-29 12:06:47 -08:00
Daniel Nelson
297897ae0a Add dcos plugin to changelog and readme 2017-11-29 11:54:33 -08:00
Daniel Nelson
414a7e34fb Add input plugin for DC/OS (#3519) 2017-11-29 11:50:32 -08:00
Patrick Hemmer
bf65e19486 Fix postfix plugin age to use ctime, not mtime (#3525) 2017-11-29 11:25:31 -08:00
Daniel Nelson
2c70958c24 Update changelog 2017-11-29 10:52:59 -08:00
Daniel Nelson
d727a6f85c Add slab to mem plugin (#3518) 2017-11-29 10:49:45 -08:00
Daniel Nelson
4e9b19f7a6 Add bond input to readme and update changelog 2017-11-28 15:19:30 -08:00
Ildar Svetlov
132fb50150 Add bond input plugin (#3424) 2017-11-28 15:16:19 -08:00
Daniel Nelson
d1ba75176d Update changelog 2017-11-28 10:10:36 -08:00
Patrick Hemmer
76240b9f18 Add postfix input plugin (#2553) 2017-11-28 10:08:41 -08:00
Daniel Nelson
06e22ee7ac Update changelog 2017-11-27 17:06:50 -08:00
Lukasz Jagiello
a18eedb970 Use deb-systemd-invoke to restart service (#3506)
From man page:
```
deb-systemd-invoke is a Debian-specific helper script which asks
       /usr/sbin/policy-rc.d before performing a systemctl call.

deb-systemd-invoke is intended to be used from maintscripts to start
       systemd unit files. It is specifically NOT intended to be used
       interactively by users. Instead, users should run systemd and use
       systemctl, or not bother about the systemd enabled state in case they
       are not running systemd.
```

This PR replace regular `systemctl` with `deb-systemd-invoke`.
2017-11-27 17:05:32 -08:00
Lukasz Jagiello
6514399baf Add shadow-utils dependency to rpm package (#3505) 2017-11-27 17:02:16 -08:00
Dylan Meissner
27994abcb5 Jolokia2 handles unordered mbean object name properties (#3504) 2017-11-27 13:43:19 -08:00
Daniel Nelson
a9ada5f65b Update changelog 2017-11-27 12:32:36 -08:00
Laurent Gosselin
f758d0c6c3 Fix global variable collection when using interval_slow option in mysql input (#3500) 2017-11-27 12:29:51 -08:00
Daniel Nelson
7442b5645f Update changelog 2017-11-20 16:50:18 -08:00
Daniel Nelson
d5bd426e0c Fix snmp tools output parsing when they contain Windows eols (#3396) 2017-11-20 16:48:30 -08:00
Daniel Nelson
154b263f14 Update changelog 2017-11-20 16:27:18 -08:00
Leandro Piccilli
92ca661662 Add support for tags in the index name in elasticsearch output (#3470) 2017-11-20 16:25:36 -08:00
Daniel Nelson
54b0b9e727 Update changelog 2017-11-20 14:40:45 -08:00
aromeyer
dc2c8791d0 Add opensmtpd input plugin (#3449) 2017-11-20 14:39:13 -08:00
Daniel Nelson
367bbdeb7e Update changelog 2017-11-20 14:37:09 -08:00
aromeyer
e544d742f9 Add unbound input plugin (#3434) 2017-11-20 14:32:06 -08:00
Daniel Nelson
393c4c6c2d Update changelog 2017-11-20 14:23:16 -08:00
Leandro Piccilli
4d1bc620b2 Add index by week number to Elasticsearch output (#3490) 2017-11-20 14:22:29 -08:00
Daniel Nelson
db8e767f1f Update changelog 2017-11-20 14:20:05 -08:00
Chris Goller
afe05fcfef Use hexadecimal ids and lowercase names in zipkin input (#3488) 2017-11-20 14:19:32 -08:00
Daniel Nelson
9422cca2cc Update changelog 2017-11-16 16:51:02 -08:00
erayaslan
a06ee58785 Use MAX() instead of SUM() for latency measurements in sqlserver (#3471) 2017-11-16 16:49:51 -08:00
Daniel Nelson
b13eea89b1 Update changelog and add particle webhook to readme 2017-11-16 16:11:20 -08:00
David G. Simmons
b813e2ecae Add Particle Webhook Plugin (#3477) 2017-11-16 16:03:19 -08:00
Pierre Fersing
8364417009 Whitelist allowed char classes for graphite output (#3473) 2017-11-15 14:44:20 -08:00
Daniel Nelson
136c15ba33 Skip test requiring cratedb server in short test mode 2017-11-13 15:22:57 -08:00
Daniel Nelson
19839c0167 Update changelog 2017-11-13 15:09:05 -08:00
Daniel Nelson
72682973bd Fix typo in error message 2017-11-13 15:07:54 -08:00
faye-sama
a411306fba Fail metrics parsing on unescaped quotes (#3409)
Before this change Fields() method on a metric parsed from a line with
unescaped quotes could panic. This change makes such line unparseable.

Fixes #3326
2017-11-13 15:06:47 -08:00
Patrick Hemmer
cbd346117a Add tests for procstat systemd & cgroup matching (#3469) 2017-11-13 14:45:31 -08:00
Daniel Nelson
181a56018f Update changelog 2017-11-13 11:02:01 -08:00
Patrick Hemmer
6ee6d55751 Add systemd unit pid and cgroup matching to procstat (#3459) 2017-11-13 10:59:27 -08:00
Daniel Nelson
ebd73b7279 Update changelog 2017-11-10 14:39:11 -08:00
Trevor Pounds
6a57395731 Compile with Go 1.9.2 (#3458) 2017-11-10 14:39:00 -08:00
Daniel Nelson
be13f69305 Update changelog 2017-11-09 14:05:36 -08:00
Felix Geisendörfer
62ec3e50d9 Add CrateDB output plugin (#3210) 2017-11-09 14:03:16 -08:00
Daniel Nelson
07297e80a8 Set 1.4.4 release date 2017-11-08 15:21:20 -08:00
Daniel Nelson
f0578b8c83 Update changelog 2017-11-07 16:48:44 -08:00
Lukasz Jagiello
493af043d3 Add Solr input plugin (#2019) 2017-11-07 16:44:09 -08:00
Daniel Nelson
47d013132a Update changelog 2017-11-07 14:37:04 -08:00
Pierre Tessier
dcff769fed Add modification_time field to filestat input plugin (#3305) 2017-11-07 14:32:48 -08:00
Daniel Nelson
5141f8a2a0 Update contributing documentation 2017-11-07 13:59:06 -08:00
Daniel Nelson
bb14589469 Update changelog 2017-11-07 13:59:06 -08:00
Daniel Nelson
b81bea658f Always ignore autofs filesystems in disk input (#3440) 2017-11-07 11:45:09 -08:00
Daniel Nelson
2c2dc97702 Update changelog 2017-11-07 11:43:15 -08:00
Daniel Nelson
cbbdf1043b Use current time if container read time is zero value (#3437) 2017-11-07 11:41:53 -08:00
Daniel Nelson
c55f285de0 Update changelog 2017-11-07 11:36:29 -08:00
Daniel Nelson
e1295c41c8 Update gopsutil to v2.17.10 (#3441) 2017-11-07 11:26:11 -08:00
Daniel Nelson
e0df62c27b Update changelog 2017-11-06 17:42:42 -08:00
Bob Shannon
fdf12ce6b4 Redact datadog API key in log output (#3420) 2017-11-06 17:41:14 -08:00
Daniel Nelson
e5a265c8c7 Revert particle webhook changes on master 2017-11-06 10:47:10 -08:00
David G. Simmons
112955a9f5 Merge branch 'master' of https://github.com/influxdata/telegraf into dn-particle-plugin 2017-11-04 09:30:17 -04:00
David G. Simmons
da0ca8a870 Revert "Undo Revert "Revert changes since 9b0af4478""
This reverts commit 6e6aefe5da.
2017-11-04 09:19:37 -04:00
David G. Simmons
6e6aefe5da Undo Revert "Revert changes since 9b0af4478"
This reverts commit 2c31345c70.
2017-11-04 09:14:52 -04:00
David G. Simmons
ae2635b547 Readme update 2017-11-04 08:43:13 -04:00
Daniel Nelson
c14478f025 Update http_listener certs 2017-11-03 21:52:45 -07:00
Daniel Nelson
2c31345c70 Revert changes since 9b0af4478 2017-11-03 21:10:56 -07:00
David G. Simmons
4a9fa7ef4b Merge branch 'master' of https://github.com/influxdata/telegraf into dn-particle-plugin 2017-11-03 13:48:45 -04:00
David G. Simmons
7db06d2aa4 Revert "New Particle Plugin"
This reverts commit ba462f5c94.
2017-11-03 13:28:54 -04:00
David G. Simmons
871fae6eb3 Revert "bug fixes and refactoring"
This reverts commit 86961cc814.
2017-11-03 13:28:35 -04:00
David G. Simmons
8e587e74f5 Revert "Update README.md"
This reverts commit 8ed00af10a.
2017-11-03 13:28:00 -04:00
David G. Simmons
440918a03b Revert "Updated README.md"
This reverts commit a6ada03b91.
2017-11-03 13:27:06 -04:00
David G. Simmons
f64b23b724 Revert "Small fixes"
This reverts commit a987118b01.
2017-11-03 13:27:06 -04:00
David G. Simmons
c11739d143 Revert "Updated Test JSON"
This reverts commit 92caf33fff.
2017-11-03 13:27:06 -04:00
David G. Simmons
883696c224 Revert "Updated Test JSON"
This reverts commit 92caf33fff.
2017-11-03 13:16:09 -04:00
David G. Simmons
0ea0519e89 Merge branch 'master' into dn-particle-plugin 2017-11-03 12:13:49 -04:00
David G. Simmons
4596ae70a9 ignore mac-files 2017-11-03 12:07:03 -04:00
David G. Simmons
92caf33fff Updated Test JSON 2017-11-03 12:07:03 -04:00
David G. Simmons
a987118b01 Small fixes
Hoping to pass CircleCI this time
2017-11-03 12:07:03 -04:00
David G. Simmons
a6ada03b91 Updated README.md 2017-11-03 12:07:03 -04:00
David G. Simmons
8ed00af10a Update README.md 2017-11-03 12:07:03 -04:00
David Norton
86961cc814 bug fixes and refactoring 2017-11-03 12:07:03 -04:00
David G. Simmons
ba462f5c94 New Particle Plugin 2017-11-03 12:07:03 -04:00
David G. Simmons
1d1d5e6089 Updated Test JSON 2017-11-02 17:21:50 -04:00
David G. Simmons
8560c2f88d Fixed Readme 2017-11-02 17:19:37 -04:00
David G. Simmons
5d135cece3 test son update 2017-11-02 14:19:01 -04:00
Daniel Nelson
9b0af4478b Remove incorrect comment about linker options 2017-11-01 18:17:51 -07:00
Daniel Nelson
26ccc1f205 Add teamspeak to readme and update changelog 2017-11-01 13:30:43 -07:00
Patric Kanngießer
76ed70340b Add Teamspeak 3 input plugin (#3315) 2017-11-01 13:27:59 -07:00
Maximilien Richer
5f215c22fe Fix typos in comments (#3415) 2017-10-31 17:00:06 -07:00
Maximilien Richer
63842d48fd Add config to input-varnish README (#3414) 2017-10-31 16:58:45 -07:00
Daniel Nelson
777b84d1dc Clarify what it means to filter metrics from processors 2017-10-30 16:32:39 -07:00
Daniel Nelson
c116af35c7 Update changelog 2017-10-30 15:35:34 -07:00
Daniel Nelson
fcfcc803b1 Use explicit schemas in mqtt_consumer input (#3401) 2017-10-30 15:33:20 -07:00
Daniel Nelson
4d5de8698b Update changelog 2017-10-30 13:53:45 -07:00
Aditya C S
23ad959d71 Add support for SSL settings to ElasticSearch output plugin (#3406) 2017-10-30 13:52:40 -07:00
Aditya C S
d9fa916711 Update docker plugin README (#3404) 2017-10-30 12:26:39 -07:00
Daniel Nelson
53b13a20d0 Update changelog 2017-10-27 11:55:17 -07:00
Maximilien Richer
ffa8a4a716 Add instance name option to varnish plugin (#3398)
This change add a new configuration option to allow probing of
namespaced varnish instances, usually reached using the '-n' switch on
the varnish cli.
2017-10-27 11:53:59 -07:00
Daniel Nelson
8b4708c82a Update changelog 2017-10-26 13:37:54 -07:00
Vladimir S
88ec171293 Perform DNS lookup before ping (#3385) 2017-10-26 13:35:37 -07:00
Daniel Nelson
5885ef2c1c Update changelog 2017-10-25 15:29:56 -07:00
Daniel Nelson
a519abf13f Gather concurrently from snmp agents (#3365) 2017-10-25 15:28:55 -07:00
Daniel Nelson
6ea61b55d9 Set release date for 1.4.3 2017-10-25 14:15:10 -07:00
Daniel Nelson
206397d475 Update changelog 2017-10-24 16:31:22 -07:00
Jeremy Doupe
a6797a44d5 Add history and summary types to telegraf and prometheus plugins (#3337) 2017-10-24 16:28:52 -07:00
Daniel Nelson
13c1f1524a Update changelog 2017-10-24 16:25:49 -07:00
Daniel Nelson
9a062498e7 Use golang.org/x/sys/unix instead of syscall in diskio (#3384) 2017-10-24 16:22:31 -07:00
Daniel Nelson
f64cf89db1 Update changelog 2017-10-24 15:46:47 -07:00
Daniel Nelson
6d1777276c If the connector name cannot be unquoted, use the raw value (#3371) 2017-10-24 15:36:23 -07:00
Daniel Nelson
65580759fc Update changelog 2017-10-23 12:36:31 -07:00
Sergei Smolianinov
d2f9fc7d8c Fix ACL token usage in consul input plugin (#3376) 2017-10-23 12:31:27 -07:00
Daniel Nelson
77cc071796 Update changelog 2017-10-19 17:06:14 -07:00
Daniel Nelson
4deb6238a3 Add support for decimal timestamps to ts-epoch modifier (#3358) 2017-10-19 16:36:32 -07:00
Daniel Nelson
7088d98304 Update changelog 2017-10-19 16:27:29 -07:00
Daniel Nelson
4243403432 Remove warning when JSON contains null value (#3359) 2017-10-19 16:25:58 -07:00
Mamat Rahmat
3bbc2beeed Fix small typo in documentation (#3364) 2017-10-19 14:47:40 -07:00
Daniel Nelson
0e6a70b199 Update changelog 2017-10-18 17:43:01 -07:00
Daniel Nelson
ec4efe5b03 Use labels in prometheus output for string fields (#3350) 2017-10-18 17:42:30 -07:00
Daniel Nelson
adb1f5588c Update changelog 2017-10-18 14:53:34 -07:00
Daniel Nelson
6e5915c59f Fix prometheus passthrough for existing value types (#3351) 2017-10-18 14:51:08 -07:00
Daniel Nelson
9b59cdd10e Update changelog 2017-10-18 13:57:58 -07:00
clheikes
02baa696c3 Fix TELEGRAF_OPTS expansion in systemd service unit (#3354) 2017-10-18 13:57:32 -07:00
Daniel Nelson
a4fa19252f Update changelog 2017-10-18 12:47:58 -07:00
Daniel Nelson
7ba376964c Update changelog 2017-10-18 12:25:46 -07:00
Ayrdrie
a75ab3e190 Fix mongodb input panic when restarting mongodb (#3355) 2017-10-18 12:24:30 -07:00
Daniel Nelson
2208657d73 Add release date info to FAQ 2017-10-17 10:43:53 -07:00
Daniel Nelson
9d8e935734 Update changelog 2017-10-16 14:26:12 -07:00
Pierre Fersing
f5a9d1bc75 Fix CPU system plugin gets stuck after system suspend (#3342) 2017-10-16 14:25:00 -07:00
Daniel Nelson
4b05edea53 Update changelog 2017-10-16 14:19:16 -07:00
Craig Wickesser
246ffab3e0 Add UDP IPv6 support to statsd input (#3344) 2017-10-16 14:18:36 -07:00
Daniel Nelson
3ea41e885c Update changelog 2017-10-16 11:27:00 -07:00
Daniel Nelson
1f348037b7 Fix case sensitivity issue in sqlserver query (#3336) 2017-10-16 11:26:16 -07:00
Daniel Nelson
86f19dee2b Fix typo in ipmi_sensor readme 2017-10-16 11:10:06 -07:00
Daniel Nelson
a1796989f7 Add ipmi_sensor permission documentation 2017-10-13 13:53:18 -07:00
Daniel Nelson
6b67fedfdc Remove timing sensitive riemann test 2017-10-13 11:30:30 -07:00
Daniel Nelson
5cd3327d5f Update changelog 2017-10-13 11:12:27 -07:00
Adam Johnson
bf9f94eb9d Fix cloudwatch output requires unneeded permissions (#3335) 2017-10-13 11:04:40 -07:00
Daniel Nelson
0f9f757da7 Update changelog 2017-10-12 17:26:58 -07:00
Windkit Li
2f8d0f4d47 Fix snmpwalk address format in leofs input (#3328) 2017-10-12 17:26:14 -07:00
Daniel Nelson
024dea2ff9 Update changelog 2017-10-12 15:52:01 -07:00
Daniel Nelson
fa25e123d8 Fix container name filters in docker input (#3331) 2017-10-12 15:50:09 -07:00
Patrick Hemmer
bed14e5037 Fix documented equation for diskio average queue depth (#3334) 2017-10-12 15:08:51 -07:00
Daniel Nelson
c74c29b164 Remove suggested plugins from readme.
These are confusing since we don't support all of the examples.
2017-10-11 12:56:33 -07:00
Daniel Nelson
4e0c8e6026 Set 1.4.2 release date 2017-10-10 13:29:31 -07:00
Daniel Nelson
d7ea83f39b Update readme and changelog for basicstats aggregator 2017-10-10 12:04:41 -07:00
Toni Moreno
b641f06552 Add new basicstats aggregator (#2167) 2017-10-10 12:02:01 -07:00
Pierre Tessier
c7a6d4eaa4 Fix link for wavefront plugin in changelog (#3317) 2017-10-10 11:21:46 -07:00
Daniel Nelson
61b0336d97 Use 5 second timeout overhead when waiting for ping to complete 2017-10-09 15:09:07 -07:00
Daniel Nelson
761544f56d Add HasPoint method to testutil.Accumulator 2017-10-09 15:02:57 -07:00
Daniel Nelson
0f452ad0df Document /etc/default/telegraf file 2017-10-06 16:57:57 -07:00
Daniel Nelson
4093bc98b7 Update changelog 2017-10-06 16:17:09 -07:00
Christian Meilke
75567d5b51 Add ability to limit node stats in elasticsearch input (#3304) 2017-10-06 16:16:32 -07:00
Daniel Nelson
59bb31e765 Use golang 1.9.1 2017-10-05 16:19:53 -07:00
Daniel Nelson
13c7802b84 Update changelog 2017-10-05 16:15:43 -07:00
Daniel Nelson
cce40c515a Use chunked transfer encoding in InfluxDB output (#3307) 2017-10-05 16:14:21 -07:00
Daniel Nelson
6e1fa559a3 Update changelog 2017-10-05 16:05:51 -07:00
Daniel Nelson
f56dda0ac8 Fix panic in cpu input if number of cpus changes (#3306) 2017-10-05 16:02:21 -07:00
Daniel Nelson
4fab572b6b Release buffer back to pool earlier 2017-10-05 12:12:14 -07:00
Daniel Nelson
b9f319529f Update changelog 2017-10-04 15:30:11 -07:00
Christian Meilke
0bb32570ba Add cluster health level configuration to elasticsearch input (#3269) 2017-10-04 15:29:32 -07:00
Daniel Nelson
a4ea4c7a25 Add smart to changelog and readme 2017-10-04 15:18:15 -07:00
Rickard von Essen
e69c3f9d1c Add smart input plugin for collecting S.M.A.R.T. data (#2449) 2017-10-04 15:15:58 -07:00
Daniel Nelson
002ccf3295 Update changelog 2017-10-03 15:25:19 -07:00
Daniel Nelson
a163effa6d Add support for proxy environment variables to http_response (#3302) 2017-10-03 15:22:57 -07:00
Daniel Nelson
93ff811358 Update changelog 2017-10-03 14:37:02 -07:00
Aditya C S
dd4299e925 Collect Docker Swarm service metrics in docker input plugin (#3141) 2017-10-03 14:36:26 -07:00
Daniel Nelson
b610276485 Skip invalid urls in nginx input 2017-10-03 10:54:31 -07:00
David Norton
6aee40fac1 bug fixes and refactoring 2017-10-03 09:07:15 -04:00
Pierre Tessier
79f66dc5b3 Added newline to each metric line in wavefront output (#3290) 2017-10-02 17:42:21 -07:00
Daniel Nelson
0a55ab42b4 Update changelog 2017-10-02 17:39:32 -07:00
Jimena Cabrera Notari
aba269e94c Add extra wired tiger cache metrics to mongodb input (#3281) 2017-10-02 17:38:51 -07:00
Daniel Nelson
f67350107d Update changelog 2017-10-02 17:16:38 -07:00
Daniel Nelson
8e3ed96d6f Fix case sensitivity error in sqlserver input (#3287) 2017-10-02 17:15:34 -07:00
Daniel Nelson
771fbc311a Regenerate TLS certs due to expiration 2017-10-02 15:44:55 -07:00
David G. Simmons
d7b88b10ad New Particle Plugin 2017-10-02 16:50:23 -04:00
Daniel Nelson
cdca81c999 Fix mqtt_consumer connection_timeout test 2017-10-02 12:28:31 -07:00
Daniel Nelson
ed6f438c9d Add Wavefront output to changelog and readme 2017-09-29 16:15:48 -07:00
Pierre Tessier
366f3f560c Add Wavefront output plugin (#3160) 2017-09-29 16:13:08 -07:00
Daniel Nelson
e4f5547d37 Update example config 2017-09-29 16:09:31 -07:00
Daniel Nelson
e1bf655ef9 Add deprecation notice to jolokia sample config 2017-09-29 16:08:31 -07:00
Daniel Nelson
29b6f4168c Update changelog 2017-09-29 15:59:56 -07:00
Daniel Nelson
3d62e045af Fix format of connection_timeout in mqtt_consumer (#3286) 2017-09-29 15:58:38 -07:00
Daniel Nelson
ad4a5aa7a0 Document how to exclude kubernetes annotation 2017-09-29 14:07:19 -07:00
Daniel Nelson
f2cb1da7cf Update changelog 2017-09-29 11:50:15 -07:00
François de Metz
c3d15f0aff Add support for the rollbar occurrence webhook event. (#1692) 2017-09-29 11:49:22 -07:00
David G. Simmons
b2453e3ec3 Revert "New Particle.io Plugin for Telegraf"
This reverts commit c3b11f9cfb.
Accidentally pushed to master, instead of my fork. Backing it out.
2017-09-29 12:57:13 -04:00
David G. Simmons
c3b11f9cfb New Particle.io Plugin for Telegraf
Only the tests need to be fixed.
2017-09-29 12:45:06 -04:00
Daniel Nelson
cd1791494a Update changelog 2017-09-27 11:38:43 -07:00
Daniel Nelson
402460f038 Use underscore as default opentsdb seperator
Preserves backwards compatibility
2017-09-27 11:36:41 -07:00
owlet123
f85db90780 Add configurable separator for metrics and fields in opentsdb output (#3106) 2017-09-27 11:29:40 -07:00
Daniel Nelson
9bddd50a64 Add deprecation notice to jolokia plugin 2017-09-27 10:52:10 -07:00
Daniel Nelson
b8a0b8461a Update changelog and readme for jolokia2 plugin 2017-09-26 17:42:38 -07:00
Dylan Meissner
ee26191eb5 Add redesigned Jolokia input plugin (#2278) 2017-09-26 17:34:46 -07:00
Daniel Nelson
cadafa6405 Update changelog 2017-09-26 16:03:04 -07:00
Daniel Nelson
22a9ffbb9d Allow JSON data format to contain zero metrics (#3268) 2017-09-26 15:58:33 -07:00
Daniel Nelson
2e1457a496 Update changelog 2017-09-26 15:38:22 -07:00
Daniel Nelson
8614445235 Fix parsing of JSON with a UTF8 BOM in httpjson (#3267) 2017-09-26 15:36:00 -07:00
Daniel Nelson
f23d1eb078 Update changelog 2017-09-26 15:28:07 -07:00
Daniel Nelson
ef5c12bd86 Fix dmcache tests with 32bit int 2017-09-26 15:25:57 -07:00
Daniel Nelson
c013cc1497 Fix cgroup tests with 32bit int 2017-09-26 15:25:57 -07:00
Daniel Nelson
bb665cf013 Fix ceph tests with 32bit int 2017-09-26 15:25:57 -07:00
Daniel Nelson
5dff5932fd Fix nginx_plus tests with 32bit int 2017-09-26 15:25:57 -07:00
Daniel Nelson
f823fc73f6 Allow 64bit integers in kernel_vmstat 2017-09-26 15:25:57 -07:00
Daniel Nelson
fd702e6bb8 Set 1.4.1 release date in changelog 2017-09-26 14:19:02 -07:00
Daniel Nelson
50024c1860 Update changelog 2017-09-25 16:34:04 -07:00
Lukasz Jagiello
a4b8805f7f Add support for NSQLookupd to nsq_consumer (#3215) 2017-09-25 16:33:05 -07:00
James
837e6b1a32 Add additional numeric type handling tests for postgresql_extensible (#3066) 2017-09-25 10:58:10 -07:00
Agniva De Sarker
063f3f68df Improve statsd plugin perf by using a byte buffer pool (#3254) 2017-09-25 10:55:02 -07:00
Daniel Nelson
b24663b0bd Remove nightly versioning scheme 2017-09-22 18:07:08 -07:00
Daniel Nelson
366bda45c3 Remove out of date Vagrantfile 2017-09-22 17:35:58 -07:00
Daniel Nelson
c010fb1c3c Fix build versioning; add dev.docker file 2017-09-22 17:35:58 -07:00
Daniel Nelson
08c197f73a Fix golang version 2017-09-22 17:35:58 -07:00
Daniel Nelson
cafb22d145 Fix unittest for golang 1.9 2017-09-22 17:35:58 -07:00
Christian Meilke
73df179bd6 Tag original URL for k8s services in prometheus input (#3257) 2017-09-22 17:26:19 -07:00
Daniel Nelson
c3bea59f3b Update changelog 2017-09-22 11:46:47 -07:00
Daniel Nelson
52393582d2 Unlock Statsd when stopping to prevent deadlock (#3258) 2017-09-22 11:45:45 -07:00
Daniel Nelson
ce29ca78e3 Add nginx_plus to changelog and readme 2017-09-19 11:49:55 -07:00
Patrick O'Brien
6e6ed075dc Add new nginx_plus input plugin (#3214) 2017-09-19 11:46:01 -07:00
Daniel Nelson
c0a4bd99a1 Update changelog 2017-09-19 11:27:57 -07:00
Paulo Cabido
decb09e760 Add configurable metrics endpoint to prometheus output (#3245) 2017-09-19 11:27:11 -07:00
Daniel Nelson
a63f80e017 Build with go 1.9 on circleci 2017-09-18 16:30:09 -07:00
Daniel Nelson
daee48c861 Update prometheus input documentation 2017-09-18 16:21:45 -07:00
Daniel Nelson
dea8bf7ac0 Update changelog 2017-09-18 15:07:18 -07:00
Christian Meilke
292c5229bf Add support for k8s service DNS discovery to prometheus input (#3236) 2017-09-18 15:06:11 -07:00
Daniel Nelson
0048bf2120 Update changelog 2017-09-18 14:25:17 -07:00
Daniel Nelson
b8e134cd37 Fix arm64 packages contain 32-bit executable (#3246) 2017-09-18 14:22:54 -07:00
Patrick Hemmer
0339dc7faf Add process resource limits to procstat input (#3231) 2017-09-15 11:16:44 -07:00
Daniel Nelson
575a07c985 Update input plugin example readme. 2017-09-14 15:50:55 -07:00
Daniel Nelson
b94cda6b46 Update changelog 2017-09-14 15:28:47 -07:00
Trevor Pounds
73372872c2 Fix panic in statsd p100 calculation (#3230) 2017-09-14 15:27:42 -07:00
Daniel Nelson
103ae3b710 Update changelog 2017-09-14 15:22:46 -07:00
Trevor Pounds
171332c579 Add support for timing sums in statsd input (#3234) 2017-09-14 15:21:54 -07:00
Daniel Nelson
875ab3c4b7 Update changelog 2017-09-14 15:05:03 -07:00
Mark Wilkinson - m82labs
1c5ebd4be3 Fix duplicate keys in perf counters sqlserver query (#3175) 2017-09-14 15:04:13 -07:00
Daniel Nelson
103d24bfba Update changelog 2017-09-14 15:00:55 -07:00
Daniel Nelson
d5f48e3e96 Fix skipped line with empty target in iptables (#3235) 2017-09-14 14:59:28 -07:00
Daniel Nelson
7a41d2c586 Update changelog 2017-09-14 13:06:58 -07:00
Trevor Pounds
fa1982323a Fix counter and gauge metric types. (#3232) 2017-09-14 13:05:37 -07:00
Daniel Nelson
cdf63c5776 Update changelog 2017-09-13 17:31:39 -07:00
Daniel Nelson
0a8c2e0b3b Whitelist allowed char classes for opentsdb output. (#3227) 2017-09-13 17:30:52 -07:00
Daniel Nelson
9197a59cdb Update changelog 2017-09-13 17:28:33 -07:00
Dimitris Rozakis
9c8f4afa37 Respect path prefix in influx output uri (#3224) 2017-09-13 17:27:01 -07:00
Daniel Nelson
eebee9759f Fix fluentd test 2017-09-12 17:57:55 -07:00
Daniel Nelson
ee85f9275e Update changelog 2017-09-12 17:27:50 -07:00
Daniel Nelson
4e53464fe2 Remove unneeded error check 2017-09-12 17:24:57 -07:00
Adrián López
2163981872 Add timeout option for kubernetes (#3211) 2017-09-12 17:22:15 -07:00
Daniel Nelson
c5cfde667a Update changelog 2017-09-12 17:17:41 -07:00
Daniel Nelson
8a68e7424c Fix optional field types in fluentd input 2017-09-12 17:15:19 -07:00
Daniel Nelson
cc63b3b667 Update changelog 2017-09-11 12:27:39 -07:00
DanKans
5488f4b3ac Fix MQTT input exits if Broker is not available on startup (#3202) 2017-09-11 12:24:51 -07:00
Daniel Nelson
14a4b108b4 Update changelog 2017-09-11 11:57:18 -07:00
Daniel Nelson
32f313a6a6 Add polling method to logparser and tail inputs (#3213) 2017-09-11 11:56:04 -07:00
Daniel Nelson
c720200883 Update changelog 2017-09-11 11:54:18 -07:00
DanKans
f62e543003 Fix address already in use with webhooks input during reload (#3206) 2017-09-11 11:51:45 -07:00
Daniel Nelson
be83c8c8f0 Update changelog 2017-09-08 16:02:15 -07:00
Jeff Nickoloff
c809debfd4 TLS and MTLS enhancements to HTTPListener input plugin (#3191) 2017-09-08 16:01:16 -07:00
Daniel Nelson
247c2e71fd Update changelog 2017-09-08 15:36:26 -07:00
Daniel Nelson
7b08f9d099 Add support for standard proxy env vars in outputs. (#3212) 2017-09-08 15:35:20 -07:00
Daniel Nelson
d0b690f040 Fix short tests on darwin (#3099) 2017-09-08 13:03:37 -07:00
Daniel Nelson
98ca22597d Update changelog 2017-09-06 14:29:03 -07:00
Raúl Benencia
99dfc69fbb Include mount mode option in disk metrics (#3027) 2017-09-06 14:28:11 -07:00
Daniel Nelson
144862354a Update changelog 2017-09-06 14:20:38 -07:00
Daniel Nelson
402a0f16e1 Fix typo 2017-09-06 14:19:42 -07:00
Pavel Gurkov
5d4eec606f Add Kafka output plugin topic_suffix option (#3196) 2017-09-06 14:18:26 -07:00
Daniel Nelson
ab1c11b06d Add 1.4.0 release date 2017-09-05 17:14:11 -07:00
Daniel Nelson
864ea1efaf Improve question title in FAQ 2017-09-05 17:12:36 -07:00
Daniel Nelson
4fb1c3a2bc Add FAQ doc with dns resolver information 2017-09-05 13:12:11 -07:00
Daniel Nelson
9796d3c99d Use ip address for default InfluxDB ip in config
Helps with initial setup if localhost cannot be resolved due to the pure
go resolver.
2017-09-05 12:55:21 -07:00
Daniel Nelson
98e784faf3 Sort metrics before comparing in graphite test 2017-09-05 12:50:30 -07:00
rdxmb
16d6011ca1 Fix docker image name in docs (#3193) 2017-09-05 11:44:51 -07:00
Daniel Nelson
f43af72785 Update changelog 2017-08-31 13:43:47 -07:00
Daniel Nelson
28d16188b3 Fix panic when handling string fields with escapes (#3188) 2017-08-30 21:16:37 -07:00
Daniel Nelson
19f3264073 Update changelog 2017-08-29 16:27:02 -07:00
Daniel Nelson
8225bd0173 Convert bool fields to int in graphite serializer 2017-08-29 16:22:03 -07:00
Seua Polyakov
3806424aab Skip non-numerical values in graphite format (#3179) 2017-08-29 15:59:38 -07:00
Daniel Nelson
ef8876b70b Move changelog item to 1.4 2017-08-28 17:17:03 -07:00
Daniel Nelson
5fd8ab36d3 Update changelog 2017-08-28 17:08:44 -07:00
Jeff Nickoloff
ac1fa05672 Added CloudWatch metric constraint validation (#3183) 2017-08-28 16:56:03 -07:00
Daniel Nelson
73d57c8a02 Update changelog 2017-08-28 16:30:51 -07:00
Nevins
95fe0e43f5 Add support for sharding based on metric name (#3170) 2017-08-28 16:24:38 -07:00
Daniel Nelson
02f7b0d030 Update changelog 2017-08-28 16:11:00 -07:00
Dylan Meissner
a9a40cbf87 HTTP headers can be added to InfluxDB output (#3182) 2017-08-28 16:08:50 -07:00
Daniel Nelson
a98496591a Update changelog 2017-08-25 18:08:33 -07:00
Ashton Kinslow
0a6541dfa8 Fix NSQ input plugin when used with version 1.0.0-compat 2017-08-25 18:06:48 -07:00
Daniel Nelson
8ecc58639a Close response bodies in http_listener test 2017-08-25 13:58:45 -07:00
Daniel Nelson
6abecd0ac7 Update changelog 2017-08-25 12:59:19 -07:00
Rickard von Essen
0502b65316 Don't fail parsing of zpool stats if pool health is UNAVAIL on FreeBSD (#3149) 2017-08-25 12:57:35 -07:00
Daniel Nelson
e400fcf5da Update changelog 2017-08-25 11:55:59 -07:00
Jan Willem Janssen
d449833de9 Fix parsing of SHM remotes in ntpq input (#3163) 2017-08-25 11:54:06 -07:00
Daniel Nelson
58751fa4df Update fail2ban documentation 2017-08-25 11:42:07 -07:00
Daniel Nelson
656ce31d98 Fix amqp_consumer data_format documentation
closes #3164
2017-08-24 13:17:29 -07:00
Daniel Nelson
485e273187 Add links to nightly builds 2017-08-23 15:42:25 -07:00
Daniel Nelson
f95c239a3f Update changelog 2017-08-23 15:21:48 -07:00
Daniel Nelson
ae24a0754b Escape backslash within string fields (#3161) 2017-08-23 15:17:26 -07:00
Daniel Nelson
f253623231 Update changelog 2017-08-23 15:16:04 -07:00
Rickard von Essen
f0db4fd901 Enable hddtemp on all platforms (#3153)
Also disables dmcache tests on non-linux.
2017-08-23 15:14:32 -07:00
Daniel Nelson
8c68bd9ddb Update changelog 2017-08-22 17:03:00 -07:00
Daniel Nelson
9fc7220c2e Don't start Telegraf on install in Amazon Linux (#3156) 2017-08-22 17:01:59 -07:00
Daniel Nelson
6597b55477 Update changelog 2017-08-22 16:55:15 -07:00
Daniel Nelson
1f4a997164 Don't retry points beyond retention policy (#3155) 2017-08-22 16:52:26 -07:00
Daniel Nelson
5224b526f4 Hide output of git describe 2017-08-22 13:32:52 -07:00
Rickard von Essen
371638ce56 Enable fail2ban on all platforms (#3151) 2017-08-22 12:58:00 -07:00
Rickard von Essen
53c5d3a290 Enable chrony for all platforms (#3152) 2017-08-22 11:49:51 -07:00
Daniel Nelson
b480022330 Update config directory documentation 2017-08-22 11:33:26 -07:00
Daniel Nelson
ccf17a9f93 Cache intermediate objects during build 2017-08-21 17:26:55 -07:00
Chris Goller
13a6b917c3 Add JSON input support to zipkin plugin (#3150) 2017-08-21 17:24:54 -07:00
Daniel Nelson
1f1e9cc49f Add win_services to the readme 2017-08-18 17:57:30 -07:00
Daniel Nelson
70c2b83f00 Update histogram aggregator documentation (#3133) 2017-08-18 13:24:05 -07:00
Daniel Nelson
4de264ffc8 Remove version test 2017-08-18 11:08:48 -07:00
Daniel Nelson
36c2c88fd2 Update example config 2017-08-17 18:54:06 -07:00
Daniel Nelson
e31d91f0f9 Add queues to rabbitmq documentation (#3135) 2017-08-17 18:52:27 -07:00
Daniel Nelson
3006ccbf2f Update master for 1.5 development 2017-08-16 16:54:15 -07:00
Daniel Nelson
8b588ea37f Update sample config 2017-08-16 16:46:40 -07:00
Daniel Nelson
7608251633 Add tomcat input to changelog and readme 2017-08-16 15:36:56 -07:00
Daniel Nelson
1e9d7cd6e9 Add error status handle to tomcat input 2017-08-16 15:33:47 -07:00
mlindes
a91457e001 Add tomcat input plugin (#3112) 2017-08-16 15:33:20 -07:00
Daniel Nelson
fd3a9bf46a Update changelog 2017-08-16 12:26:00 -07:00
Daniel Nelson
ca394fcfb2 Discard logging from tail library (#3128) 2017-08-16 12:06:07 -07:00
Daniel Nelson
3819607511 Allow using system plugin in Windows (#3127) 2017-08-16 12:05:46 -07:00
Daniel Nelson
eb0215c382 Remove log message on ping timeout (#3126) 2017-08-16 11:59:41 -07:00
Daniel Nelson
09153c815c Move http_response headers to end of configuration.
If the subtable comes before other options, they will be placed in the
subtable.
2017-08-15 11:50:08 -07:00
Daniel Nelson
9bc13f143e Test for nil metric before reading tags in logparser 2017-08-15 11:43:16 -07:00
Daniel Nelson
032348c7a5 Update changelog 2017-08-14 14:51:28 -07:00
Bob Shannon
5fbdd09aaf Add gzip content-encoding support to influxdb output (#2978) 2017-08-14 14:50:15 -07:00
Daniel Nelson
7d5dae5a08 Improve apache input docs (#3120) 2017-08-11 17:50:51 -07:00
Daniel Nelson
54be037911 Use double hyphen in cli examples 2017-08-11 16:26:54 -07:00
Daniel Nelson
5003809e97 Merge LDFLAGS from env into build 2017-08-11 16:26:54 -07:00
G-Research
1b50f14d55 Build NTPQ input on Windows (#3117) 2017-08-11 13:36:25 -07:00
Patrick Hemmer
b0109b3550 Add weighted_io_time to diskio input (#3119) 2017-08-11 11:49:42 -07:00
Daniel Nelson
257b460f61 Update changelog 2017-08-10 12:41:09 -07:00
Daniel Nelson
287a44de5e Skip compilcation of logparser and tail on solaris (#3113)
Allows compilation for solaris
2017-08-10 12:36:11 -07:00
Daniel Nelson
73897d1f1c Update changelog 2017-08-10 10:22:11 -07:00
Daniel Nelson
1e2d594af0 Converge to typed value in prometheus output (#3104) 2017-08-10 10:19:28 -07:00
Daniel Nelson
83c003e594 Update changelog 2017-08-09 11:48:36 -07:00
Daniel Nelson
84ce9629a8 Tweak formatting of varnish README 2017-08-09 11:48:12 -07:00
Daniel Nelson
3c14b46f6f Fix ordering of all target 2017-08-09 11:47:55 -07:00
Benjamin Stromski
8a2373e8c8 Add option to run varnish under sudo (#3097) 2017-08-09 11:38:54 -07:00
Daniel Nelson
cb04fa1e9c Add diskio %util sample query 2017-08-09 11:28:27 -07:00
Seva Poliakov
92af42a847 Remove tag_env duplicate from docker README (#3109) 2017-08-09 10:21:22 -07:00
Daniel Nelson
bceb020d72 Update changelog and readme 2017-08-08 11:50:16 -07:00
Rodolphe Blancho
d9deb266df Add salesforce input plugin (#3075) 2017-08-08 11:48:01 -07:00
Slawomir Skowron
f3435f1c59 Add TCP listener for statsd input (#2293) 2017-08-08 11:41:26 -07:00
Daniel Nelson
f9573ad969 Remove Godeps_windows from build.py 2017-08-07 17:43:06 -07:00
Daniel Nelson
40aacd9046 Fix artifact redirection 2017-08-07 17:41:52 -07:00
Daniel Nelson
5e73f3e816 Only upload nightly if on master branch 2017-08-07 17:24:35 -07:00
Daniel Nelson
a1e7a5f474 Upload as nightly builds if PACKAGE set 2017-08-07 17:16:34 -07:00
Daniel Nelson
828c5817f9 Update changelog 2017-08-07 16:18:01 -07:00
Daniel Nelson
3e27134872 Add path tag to logparser containing path of logfile (#3098) 2017-08-07 16:16:31 -07:00
Daniel Nelson
1fb5373962 Build releases with -w -s ldflags 2017-08-07 15:47:20 -07:00
Daniel Nelson
75e6ebcf93 Update changelog 2017-08-07 14:39:22 -07:00
Vlasta Hajek
e21f2de8b8 Add Windows Services input plugin (#3023) 2017-08-07 14:36:15 -07:00
Daniel Nelson
795f02ab88 Cleanup Makefile (#3089) 2017-08-03 11:54:05 -07:00
Daniel Nelson
360d03e301 Update changelog and readme 2017-08-02 18:02:41 -07:00
Daniel Nelson
137b312fa9 Add Zipkin input plugin (#3080) 2017-08-02 17:58:26 -07:00
Daniel Nelson
ce12913bc2 Update precision documentation and examples
Precision is no longer used by the InfluxDB output.

closes #3079
2017-08-01 15:02:36 -07:00
Daniel Nelson
d82c5062b8 Add Appveyor continuous integration (#3074) 2017-07-31 16:12:09 -07:00
Daniel Nelson
6666e6a5a7 Update changelog 2017-07-31 11:37:32 -07:00
Vladislav Mugultyanov
9c0aadf445 Add histogram aggregator plugin (#2387) 2017-07-31 11:33:51 -07:00
Daniel Nelson
3bd14ed229 Update changelog 2017-07-31 11:30:27 -07:00
DanKans
5e95367f6c Sanitize password from couchbase metric (#3033) 2017-07-31 11:29:14 -07:00
Jeff Ashton
c31e7d0b91 Fix win_perf_counters tests (#3068) 2017-07-31 11:03:26 -07:00
Oscar Sironi
f8c84302a4 Add config file path troubleshooting advice for Windows (#3071) 2017-07-31 10:58:12 -07:00
Daniel Nelson
9143670d6e Update changelog 2017-07-27 17:19:33 -07:00
Daniel Nelson
f0bd69d904 Add tls options to docker input (#3063) 2017-07-27 17:18:44 -07:00
Daniel Nelson
7179290dea Update changelog 2017-07-27 15:21:52 -07:00
Daniel Nelson
c4297f40ad Allow iptable entries with trailing text (#3060) 2017-07-27 15:21:06 -07:00
Daniel Nelson
0d4c954e01 Update changelog 2017-07-27 15:15:11 -07:00
Daniel Nelson
d6cf9f4f30 Fix docker memory and cpu reporting in Windows (#3043) 2017-07-27 15:12:29 -07:00
Daniel Nelson
5f88be022c Add circleci parameter to build packages 2017-07-26 17:13:50 -07:00
Daniel Nelson
284ab79a37 Set 1.3.5 release date 2017-07-26 15:53:49 -07:00
Daniel Nelson
2bd6c80506 Update changelog 2017-07-25 17:12:45 -07:00
Daniel Nelson
0ca936a12e Default to localhost if zookeeper has no servers set (#3056) 2017-07-25 17:08:32 -07:00
Daniel Nelson
a26fc52181 Fix panic in logparser if file cannot be opened (#3055) 2017-07-25 17:08:03 -07:00
Daniel Nelson
83f575fcea Add redis_version field to redis input (#3054) 2017-07-25 17:07:43 -07:00
Daniel Nelson
ffd1f25b75 Update changelog 2017-07-25 16:09:48 -07:00
Daniel Nelson
1658404cea Update changelog 2017-07-25 15:43:13 -07:00
Daniel Nelson
82ea04f188 Fix prometheus output cannot be reloaded (#3053) 2017-07-25 15:41:18 -07:00
xin053
273d0b85b0 Correct spelling of toml field in mysql input (#3051) 2017-07-25 10:57:27 -07:00
Théophile Helleboid - chtitux
f3917ec5ff Fix typo in postgresql_extensible/README.md (#3052) 2017-07-25 10:39:14 -07:00
Daniel Nelson
428455e032 Update changelog 2017-07-24 18:26:29 -07:00
Daniel Nelson
573bd4aa32 Start first aggregator period at startup time (#3050)
Fixes issue where metrics collected immediately after startup would not
be aggregated.
2017-07-24 18:25:05 -07:00
Oskar
ab5205f8c3 Fix go vet under windows (#3046) 2017-07-24 12:36:33 -07:00
Daniel Nelson
85aa212467 Update changelog 2017-07-21 16:57:28 -07:00
Daniel Nelson
840d19db35 Add network option to dns_query (#3042) 2017-07-21 16:56:08 -07:00
Daniel Nelson
1c267e9b16 Update changelog 2017-07-21 15:46:22 -07:00
Andy Cobaugh
1ff6e92193 Add input plugin for OpenLDAP (#2612) 2017-07-21 15:44:20 -07:00
Daniel Nelson
c82c0e596b Update changelog 2017-07-21 14:31:25 -07:00
Daniel Nelson
31ce98fa91 Don't match pattern on any error (#3040)
This prevents a pattern with no wildcards from matching in case
permissions is denied.
2017-07-21 14:28:14 -07:00
Daniel Nelson
4d66db1603 Update changelog 2017-07-21 14:26:39 -07:00
Yann Cézard
681d20083a Only report cpu usage for online cpus in docker input (#3035) 2017-07-21 14:25:17 -07:00
Daniel Nelson
4ee74ff54b Document GNU make requirement 2017-07-21 11:15:00 -07:00
Daniel Nelson
16073e4172 Update changelog 2017-07-21 10:57:39 -07:00
Daniel Nelson
3c204d409d Line wrap documentation 2017-07-21 10:57:12 -07:00
DanKans
d903a9142d Fix filtering when both pass and drop match an item (#3036)
Adjust logic in functions responsible for passing metrics in order to be able
to process them correctly in case where pass and drop are defined together.
2017-07-21 10:53:57 -07:00
Daniel Nelson
a2d4453269 Update changelog 2017-07-19 13:09:49 -07:00
DanKans
34c042c7dc Fix combined tagdrop/tagpass filtering (#3031) 2017-07-19 13:08:40 -07:00
Daniel Nelson
4dfe2312d0 Switch skipped kafka test 2017-07-18 18:18:57 -07:00
Daniel Nelson
c740dce36d Update download information in readme 2017-07-18 13:54:38 -07:00
Daniel Nelson
475a926d43 Update changelog 2017-07-18 11:03:07 -07:00
DanKans
d2626f1da6 Fix ntpq parse issue when using dns_lookup (#3026) 2017-07-18 11:01:08 -07:00
soldierkam
f5a8415c78 Add read timeout to socket_listener 2017-07-17 18:34:36 -07:00
Daniel Nelson
1d416a4213 Remove command in example output 2017-07-17 15:08:17 -07:00
Daniel Nelson
731ab9773d Update changelog 2017-07-17 12:01:35 -07:00
Daniel Nelson
d8f7b76253 Prevent startup if intervals are 0 2017-07-17 11:58:47 -07:00
Daniel Nelson
dbe2f79019 Update changelog 2017-07-14 10:45:32 -07:00
Bob Shannon
ef63908541 Add result_type field to net_response input plugin (#2990) 2017-07-14 10:43:36 -07:00
Daniel Nelson
27e47614c6 Add credits for new plugins to changelog 2017-07-13 16:14:18 -07:00
Daniel Nelson
dc4a133b11 Update changelog 2017-07-13 16:00:09 -07:00
DanKans
f4d67d8c3c Add fluentd input plugin (#2661) 2017-07-13 15:58:20 -07:00
Daniel Nelson
785798611e Update changelog 2017-07-13 15:39:45 -07:00
Daniel Nelson
b165ce4cd5 Prevent possible deadlock when using aggregators (#3016)
Looping the metrics back through the same channel could result in a
deadlock, by using a new channel and locking the processor we can ensure
that all stages can make continual progress.
2017-07-13 15:34:21 -07:00
Daniel Nelson
d9d1ca5a46 Add release date for 1.3.4 2017-07-12 17:15:38 -07:00
Daniel Nelson
2c10806fef Update changelog 2017-07-12 12:04:43 -07:00
Daniel Nelson
5d2c093105 Prevent Write from being called concurrently (#3011) 2017-07-12 12:03:23 -07:00
Daniel Nelson
f68bab1667 Update changelog 2017-07-11 15:55:44 -07:00
Daniel Nelson
1388e2cf92 Do not allow metrics with trailing slashes (#3007)
It is not possible to encode a measurement, tag, or field whose last
character is a backslash due to it being an unescapable character.
Because the tight coupling between line protocol and the internal metric
model, prevent metrics like this from being created.

Measurements with a trailing slash are not allowed and the point will be
dropped.  Tags and fields with a trailing a slash will be dropped from
the point.
2017-07-11 15:54:38 -07:00
Daniel Nelson
af318f4959 Update changelog 2017-07-11 14:10:09 -07:00
JSH
9f244cf1ac Fix chrony plugin does not track system time offset (#2989) 2017-07-11 14:08:40 -07:00
Daniel Nelson
885aa8e6e1 Update changelog 2017-07-10 19:07:28 -07:00
Daniel Nelson
945446b36f Fix handling of escapes within fieldset (#3003)
Line protocol does not require or allow escaping of backslash, the only
requirement for a byte to be escaped is if it is an escapable char and
preceeded immediately by a slash.
2017-07-10 19:05:18 -07:00
Daniel Nelson
4209ebfa6e Update changelog 2017-07-10 12:23:16 -07:00
Daniel Nelson
79f8ed874a Update elastic version to 5.0.41 (#2999) 2017-07-10 12:18:56 -07:00
Daniel Nelson
739d97639a Update dependencies 2017-07-10 12:01:22 -07:00
Wesley Merkel
ac8e28f436 Add link to Graylog input to README.md (#2995) 2017-07-10 11:22:37 -07:00
Daniel Nelson
2740a3ba44 Update changelog 2017-07-05 14:29:59 -07:00
Song Wenhao
0f850400f2 Display error message if prometheus output fails to listen (#2984) 2017-07-05 14:28:44 -07:00
Daniel Nelson
74a764d549 Update changelog 2017-06-29 16:17:08 -07:00
Aleksey Shirokih
a8a637809e Change default prometheus_client port (#2973) 2017-06-29 14:03:42 -07:00
Daniel Nelson
75dbf2b0f8 Set release date for 1.3.3 2017-06-28 13:05:06 -07:00
Daniel Nelson
90909ae708 Fix build on Windows (#2972) 2017-06-27 16:31:28 -07:00
Daniel Nelson
d40e441240 Use git sha1 as version if not tagged (#2969) 2017-06-27 13:24:06 -07:00
Adam Perlin
cc3d420551 Fix several bugs in minecraft input (#2970) 2017-06-27 13:14:07 -07:00
Daniel Nelson
f2bb4acd4a Update changelog 2017-06-26 15:25:06 -07:00
Bob Shannon
a7595c918a Fix panic in elasticsearch input if cannot determine master (#2954) 2017-06-26 15:23:53 -07:00
Daniel Nelson
a52f90122b Update changelog 2017-06-26 15:15:31 -07:00
Bob Shannon
d217cdc1a6 Add optional usage_active and time_active CPU metrics (#2943) 2017-06-26 15:13:38 -07:00
Daniel Nelson
d5b6f92f3f Log aerospike field value on error 2017-06-26 14:48:22 -07:00
Daniel Nelson
1a636abaaf Update changelog 2017-06-26 14:31:17 -07:00
vodolaz095
1fdbfa4719 Add support for RethinkDB 1.0 handshake protocol (#2963)
Allow rethinkdb input plugin to work with RethinkDB 2.3.5+ databases that requires username,password authorization and Handshake protocol v1.0

* remove top level header not required in sample config

* remove top level header not required in sample config
2017-06-26 14:29:48 -07:00
Daniel Nelson
22fc130e97 Update changelog 2017-06-23 16:56:36 -07:00
Ayrdrie
a726579d50 Add Minecraft input plugin (#2960) 2017-06-23 16:54:12 -07:00
Daniel Nelson
d774c2a170 Update changelog 2017-06-23 11:13:00 -07:00
MatthewCh
6d5bb35f84 Support HOST_PROC in processes and linux_sysctl_fs inputs (#2924) 2017-06-23 11:11:33 -07:00
Daniel Nelson
e028f10586 Update changelog 2017-06-23 11:04:13 -07:00
Daniel Nelson
9276318faf Fix bug parsing default timestamps with modified precision (#2949) 2017-06-23 10:59:04 -07:00
Daniel Nelson
82a04d904d Use strings.Join in statsd input (#2947) 2017-06-21 16:24:23 -07:00
Daniel Nelson
364da9a83d Update changelog 2017-06-21 12:46:57 -07:00
grugrut
ca9cec2c84 Add input plugin for Fail2ban (#2875) 2017-06-21 12:42:13 -07:00
Daniel Nelson
9211985c63 Update changelog 2017-06-21 12:39:09 -07:00
Daniel Nelson
929ba0a637 Remove label value sanitization in prometheus output (#2939) 2017-06-21 12:36:29 -07:00
Daniel Nelson
dcdcb70cb1 Update changelog 2017-06-19 11:52:53 -07:00
Eugene Shilin
cb5a12de3d Add standard SSL options to mysql input (#2933) 2017-06-19 11:42:43 -07:00
Artem Kovardin
193e8fa5ad More explicit 404 error in cassandra input (#2936) 2017-06-19 11:06:49 -07:00
trastle
00b37a7c0d Update README for Prometheus Client Output (#2452) 2017-06-19 11:04:08 -07:00
Daniel Nelson
736322dfc9 Set default ping count in Windows
fixes #2934
2017-06-16 13:39:55 -07:00
Daniel Nelson
ba364988de Document that ping_interval is non-linux only 2017-06-16 13:32:04 -07:00
Daniel Nelson
a729a44284 Update changelog 2017-06-16 13:18:27 -07:00
Daniel Nelson
3ecfd32df5 Allow dos line endings in tail and logparser (#2920)
Parsing dos line ending delimited line protocol is still illegal in most
cases.
2017-06-16 13:16:48 -07:00
Daniel Nelson
ea1888bd26 Update changelog 2017-06-16 12:06:40 -07:00
Simone Rotondo
674c24f987 Add HTTP Proxy support to influxdb output (#2929) 2017-06-16 12:05:08 -07:00
Daniel Nelson
ca72df5868 Update 1.3.2 release date 2017-06-14 12:16:47 -07:00
Daniel Nelson
ea787b83bf Update changelog 2017-06-13 18:07:12 -07:00
Daniel Nelson
949072e8dc Ensure prometheus metrics have same set of labels (#2857) 2017-06-13 18:04:26 -07:00
Daniel Nelson
246f342e6a Update changelog 2017-06-13 17:19:33 -07:00
Daniel Nelson
619b5d4c14 Change node_name to be a tag in aerospike input (#2918) 2017-06-13 17:09:38 -07:00
Daniel Nelson
b0efc22140 Update changelog 2017-06-13 14:10:33 -07:00
Heston Kan
5d1efdbfda Add min/max response time on linux/darwin to ping (#2908) 2017-06-13 14:09:17 -07:00
Daniel Nelson
e3ccd473d2 Update changelog 2017-06-13 13:44:07 -07:00
Dheeraj Dwivedi
f0cbfe4d67 Add secure connection support to graphite output (#2602) 2017-06-13 13:42:11 -07:00
Daniel Nelson
40d8e582ee Update changelog 2017-06-12 18:32:50 -07:00
Daniel Nelson
02b55fe77f Update aws-sdk-go dependency to latest release. (#2912) 2017-06-12 18:31:27 -07:00
Daniel Nelson
0c53de6700 Update changelog 2017-06-08 16:55:27 -07:00
Daniel Nelson
b277e6e2d7 Fix support for mongodb/leofs urls without scheme (#2900)
This was broken by changes in go 1.8 to url.Parse.  This change allows
the string but prompts the user to move to the correct url string.
2017-06-08 16:52:01 -07:00
Daniel Nelson
de4a312eba Update changelog 2017-06-08 13:20:44 -07:00
Matteo Cerutti
4b3b16ef1a Add wildcard support for container inclusion/exclusion (#2793) 2017-06-08 13:17:31 -07:00
Daniel Nelson
4c534433aa Skip kafka_consumer_integration_test due to issue on CircleCI 2017-06-07 18:31:52 -07:00
Daniel Nelson
f9447d01d4 Add release note to changelog regarding kafka_consumer 2017-06-07 18:27:12 -07:00
Seuf
2092443cd7 Add Kafka 0.9+ consumer support (#2487) 2017-06-07 18:22:28 -07:00
Bob Shannon
1c73caba04 Add SSL/TLS support to nginx input plugin (#2883) 2017-06-07 17:52:10 -07:00
Daniel Nelson
84dbf8bb25 Update changelog 2017-06-07 13:46:06 -07:00
Daniel Nelson
a275e6792a Fix metric splitting edge cases (#2896)
Metrics needing one extra byte to fit the output buffer would not be split, so we would emit lines without a line ending. Metrics which overflowed by exactly one field length would be split one field too late, causing truncated fields.
2017-06-07 13:37:54 -07:00
Daniel Nelson
de7fb2acfe Update changelog 2017-06-06 13:55:11 -07:00
Frederick Roth
91f2764cd5 Add result_type field for http_response input (#2814) 2017-06-06 13:39:07 -07:00
Daniel Nelson
4e91b18bbe Update changelog 2017-06-06 11:56:19 -07:00
Mariusz Brzeski
56a7ffe0e4 Fix timeout option in Windows ping input sample configuration (#2885) 2017-06-06 11:55:01 -07:00
Daniel Nelson
f9462d4fff Update changelog 2017-06-05 14:47:34 -07:00
Sebastian Borza
035905d65e Add timezone support to logparser timestamps (#2882) 2017-06-05 14:45:11 -07:00
Daniel Nelson
a47e6e6efe Update changelog 2017-06-05 12:46:50 -07:00
Daniel Nelson
5bab4616ff Fix udp metric splitting (#2880) 2017-06-05 12:44:29 -07:00
Daniel Nelson
37e01808b5 Set 1.3.1 release date 2017-05-31 15:00:31 -07:00
Daniel Nelson
0b6db905ff Generate sha256 hashes when packaging 2017-05-31 12:29:39 -07:00
Daniel Nelson
9529199a44 Update changelog 2017-05-30 17:40:37 -07:00
Daniel Nelson
be03abd464 Fix length calculation of split metric buffer (#2869) 2017-05-30 17:38:32 -07:00
Daniel Nelson
04aa732e94 Update changelog 2017-05-30 11:04:39 -07:00
Steve Nardone
e7f9db297e Fix panic in mongo input (#2848) 2017-05-30 11:02:26 -07:00
Daniel Nelson
24ea9fdc4d Update changelog 2017-05-26 12:12:18 -07:00
Matteo Cerutti
02d168705c MySQL input: log and continue on field parse error (#2855) 2017-05-26 12:09:43 -07:00
Daniel Nelson
7d7206b3e2 Update changelog 2017-05-25 16:20:29 -07:00
Daniel Nelson
03ca3975b5 Update gopsutil version
fixes #2856
2017-05-25 16:11:49 -07:00
Daniel Nelson
e1088b9eee Update changelog 2017-05-25 13:39:16 -07:00
Daniel Nelson
f47924ffc5 Fix influxdb output database quoting (#2851) 2017-05-25 13:25:52 -07:00
Olivier Lambert
a96f85c847 Add documentation for fetching metrics on Caddy HTTP and Prometheus (#2853) 2017-05-25 13:07:49 -07:00
Sylvain Boily
9148871608 Documentation privilege requirements for specific procstat metrics (#2787) 2017-05-25 13:06:27 -07:00
Matteo Cerutti
7d198f0a68 Add timeout option to ipmi_sensor plugin - solves #2817 (#2818) 2017-05-22 13:41:34 -07:00
Daniel Nelson
1459fab4d6 Remove changelog item from pull request template
Person who merges PR is now expected to update the CHANGELOG.
2017-05-22 12:06:48 -07:00
Daniel Nelson
b0bd4d55f5 Update CHANGELOG with fixed issue #1137 2017-05-22 12:01:22 -07:00
Steven Burgart
9ab688d62c Fix multiple plugin loading in win_perf_counters (#2800) 2017-05-22 11:58:00 -07:00
Daniel Nelson
8fdc2aec80 Update dependency license file 2017-05-19 18:03:49 -07:00
Lukasz Jagiello
91690b1d3e Consul plugin README typo (#2829) 2017-05-19 11:37:31 -07:00
Daniel Nelson
c61cd73eff Update changelog 2017-05-18 18:11:49 -07:00
rsingh2411
93e638d63e Add Docker container environment variables as tags. Only whitelisted #2580 (#2581) 2017-05-18 16:58:34 -07:00
mced
501c22478e [enh] set db_version at 0 if query version fails (#2819) 2017-05-18 13:52:56 -07:00
Daniel Nelson
7155e90f66 Update changelog for #2815 2017-05-16 17:37:51 -07:00
Timo Mihaljov
c53d9fa9b7 Handle process termination during read from /proc (#2816)
Fixes #2815.
2017-05-16 17:33:35 -07:00
Frederick Roth
ac5ac3161f Fixed inconsistency between HasIntField and IntField (#2813) 2017-05-16 15:25:30 -07:00
Daniel Nelson
bfeb3020a3 Add release date for 1.3.0 2017-05-15 19:52:35 -07:00
Daniel Nelson
b01ecdccff Add back the changelog entry for 2141 2017-05-15 12:54:03 -07:00
Daniel Nelson
da99777f6f Only split metrics if there is an udp output (#2799) 2017-05-12 15:34:05 -07:00
Zack Zatkin-Gold
dd537b3382 Fix telegraf example arguments (#2788)
Many of the examples provided within documentation are using a single
dash for the command line arguments, but the telegraf executable
explicitly has two dashes.

There are also some inconsistencies with the ordering of the command
line argument examples.  I've ordered them so that the examples will
show: config, config-directory, input-filter, test
2017-05-12 15:22:29 -07:00
Sebastian Borza
f74687dcc0 split metrics based on UDPPayload size (#2795) 2017-05-12 14:45:50 -07:00
Daniel Nelson
a47aa0dcc2 Merge branch 'reuse-transport' 2017-05-10 18:19:21 -07:00
Daniel Nelson
17d883c602 Ensure keep-alive is not used in http_response input.
Using Keep-Alive would change the timing for already established
connections.  Previous to this commit, Keep-Alive worked only when using
a response_string_match due to failure to close the request body.
2017-05-10 14:40:55 -07:00
Daniel Nelson
a1446a60f7 Update changelog 2017-05-10 13:11:33 -07:00
Daniel Nelson
1931aac284 Fix http_response input creation of transport on every gather 2017-05-09 16:23:38 -07:00
Daniel Nelson
b88eb0f59d Fix prometheus input creation of transport on every gather 2017-05-09 16:21:49 -07:00
Daniel Nelson
e7ad2d0463 Fix apache input creation of transport on every gather. 2017-05-09 16:19:56 -07:00
Daniel Nelson
c28ffb11cb Merge branch 'update-readme' 2017-05-09 13:50:19 -07:00
Daniel Nelson
018fd5ce5b Add missing plugins to README 2017-05-09 13:50:12 -07:00
Daniel Nelson
cd0ec0185a Update contributing section
Hoping this will encourage more non-plugin contributions.
2017-05-09 13:50:12 -07:00
Adrian Sadłocha
8124cfa3ed Improve PostgreSQL plugin documentation (#2777) 2017-05-09 12:58:43 -07:00
Lukasz Jagiello
5af985ef5f Add support for self-signed certs to InfluxDB input plugin (#2773) 2017-05-08 15:20:24 -07:00
Sylvain Boily
1ebd1aaa41 Systemd does not see all shutdowns as failures (#2716) 2017-05-08 11:48:29 -07:00
Daniel Nelson
de3f52b990 Update cloudwatch documentation
Mention that some metrics are available only at larger intervals than 5
minutes.  Update dead links to new locations and example config.

closes #1907
2017-05-08 11:31:20 -07:00
Daniel Nelson
4200018a0b Enable s390x builds
closes #2766
2017-05-05 14:39:56 -07:00
Daniel Nelson
67cd1669cc Add SLES11 support to rpm package (#2768) 2017-05-05 14:29:40 -07:00
Sébastien
a8cfe03ba8 fix systemd path in order to add compatibility with SuSe (#2499) 2017-05-05 14:04:33 -07:00
ceseuron
e2983383e4 Fixed sqlserver input to work with case sensitive server collation. (#2749)
Fixed a problem with sqlserver input where database properties are not returned by Telegraf when SQL Server has been set up with a case sensitive server-level collation.

* Added bugfix entry to CHANGELOG.md for sqlserver collation input fix.
2017-05-04 10:47:03 -07:00
Daniel Nelson
8cf0dc769b Add 1.4 section to changelog 2017-05-03 17:29:34 -07:00
Daniel Nelson
613de8a80d Remove documentation in kafka_consumer for metric_buffer 2017-05-03 11:51:49 -07:00
Damien Krotkine
f5c890cc1d reflect zookeeper chroot config in readme (#2759) 2017-05-03 11:50:08 -07:00
Daniel Nelson
f7f1eaef65 Return an error if no valid patterns. (#2753) 2017-05-02 14:54:38 -07:00
Alexander Blagoev
188703e204 Improve redis input documentation (#2708) 2017-05-02 11:43:07 -07:00
Patrick Hemmer
52c19af0ba fix close on closed socket_writer (#2748) 2017-05-02 11:06:49 -07:00
Daniel Nelson
5c88965084 Add initial documentation for rabbitmq input. (#2745) 2017-05-01 18:55:48 -07:00
Daniel Nelson
6e76731b7e Don't log error creating database on connect (#2740)
closes #2739
2017-04-28 15:58:46 -07:00
Daniel Nelson
c7a0e40c87 Update telegraf.conf 2017-04-28 13:47:32 -07:00
Daniel Nelson
086a2f5f12 Clarify retention policy option for influxdb output
closes #2696
2017-04-28 13:46:23 -07:00
Daniel Nelson
1da1c4753e Clarify retention policy option for influxdb output
closes #2696
2017-04-28 13:40:58 -07:00
Daniel Nelson
a083e1af7d Use go 1.8.1 for CI and Release builds (#2732) 2017-04-27 16:18:11 -07:00
Daniel Nelson
052e88ad5e Fix grammar 2017-04-27 14:59:18 -07:00
Daniel Nelson
b9ce455bba Update telegraf.conf 2017-04-27 11:53:32 -07:00
Seuf
cd103c85db Added SASL options for ouput kafka plugin (#2721) 2017-04-27 11:50:25 -07:00
Ross McDonald
a3feacbd2f Kapacitor input plugin (#2031) 2017-04-27 11:47:22 -07:00
Daniel Nelson
e1a734c525 Fix logfile documentation 2017-04-27 11:38:49 -07:00
Daniel Nelson
53ab56de72 Update haproxy README 2017-04-27 11:23:37 -07:00
Seuf
4e2fe598ac Added SSL configuration for input haproxy (#2723) 2017-04-27 11:20:41 -07:00
Daniel Nelson
5fe5c46c6d Fix amqp output block on write if disconnected (#2727)
fixes #2603
2017-04-27 11:10:30 -07:00
Damien Krotkine
153304d92b it's -> its (#2728) 2017-04-27 11:10:00 -07:00
Damien Krotkine
cb9aecbf04 it's -> its (#2729) 2017-04-27 11:06:40 -07:00
Nevins
c66e2896c6 add option to randomize Kinesis partition key (#2705) 2017-04-26 10:54:24 -07:00
Jeff Zellner
9b874dff8d Update README.md (#2719) 2017-04-25 13:17:15 -07:00
Daniel Nelson
b243faa22b Don't close stdout on config reload. (#2707)
fixes #2528
2017-04-24 16:18:58 -07:00
Patrick Hemmer
8f5cd6c2ae add keep-alive support to socket_listener & socket_writer (#2697)
closes #2635
2017-04-24 13:14:42 -07:00
Alexander Blagoev
3c28b93514 Improve procstat input documentation (#2699)
closes #1895
2017-04-24 11:18:55 -07:00
Patrick Hemmer
06baf7cf78 use AddError everywhere (#2372) 2017-04-24 11:13:26 -07:00
Alexander Blagoev
801f6cb8a0 System net input documentation (#2698)
closes #2166
2017-04-24 11:03:53 -07:00
Daniel Nelson
3684ec6315 Update EXAMPLE_README.md 2017-04-21 14:27:36 -07:00
Daniel Nelson
da0773151b Use C locale when running sadf (#2690)
fixes #1911
2017-04-21 10:55:54 -07:00
Daniel Nelson
38e1c1de77 Update commit hash of tail fork 2017-04-20 16:29:39 -07:00
Daniel Nelson
799c8bed29 Add fix for network aliases to changelog
Change was made in gopsutil
2017-04-20 15:34:30 -07:00
Alexander Blagoev
a237301932 Memcached input documentation (#2685)
Closes #2615
2017-04-20 11:25:22 -07:00
Oleg Grytsynevych
b03d78d00f win_perf_counters: Format errors reported by pdh.dll in human-readable format (#2338) 2017-04-20 11:22:44 -07:00
Martin
748ca7d503 Fixed install/remove of telegraf on non-systemd Debian/Ubuntu systems (#2360) 2017-04-20 11:19:33 -07:00
Daniel Nelson
bf30ef89ee Fix ipmi_sensor config is shared between all plugin instances (#2684) 2017-04-19 17:02:44 -07:00
Daniel Nelson
3690e1b9bf Add diskio for darwin to changelog 2017-04-19 13:42:24 -07:00
Patrick Hemmer
2542ef6d62 change jolokia input to use bulk requests (#2253) 2017-04-18 13:00:41 -07:00
Nikolay Denev
eb7ef5392e Simplify system.DiskUsage() (#2630) 2017-04-18 11:42:58 -07:00
Ross McDonald
70b3e763e7 Add input for receiving papertrail webhooks (#2038) 2017-04-17 13:49:36 -07:00
François de Metz
58ee962679 GitHub webhooks: check signature (#2493) 2017-04-17 11:42:03 -07:00
Daniel Nelson
dc5779e2a7 Rename heap_objects_bytes to heap_objects in internal plugin. (#2674)
* Rename heap_objects_bytes to heap_objects in internal plugin.

This field does not contain bytes

fixes #2671
2017-04-14 17:32:14 -07:00
Daniel Nelson
b968759d10 Use variadic disk.IOCounters() function 2017-04-14 13:48:02 -07:00
Daniel Nelson
b90a5b48a1 Improve logparser README (#2664) 2017-04-14 13:47:43 -07:00
calerogers
a12e082dbe Refactor interrupts plugin code (#2670) 2017-04-14 13:40:36 -07:00
calerogers
cadd845b36 Irqstat input plugin (#2494)
closes #2469
2017-04-13 15:53:02 -07:00
ingosus
dff216c44d Feature #1820: add testing without outputs (#2446) 2017-04-13 12:59:28 -07:00
Gregory Kman
45c9b867f6 Update ping-input-plugin Readme (#2651) 2017-04-12 17:46:48 -07:00
Chris Goffinet
9388fff1f7 Fixed content-type header in output plugin OpenTSDB (#2663) 2017-04-12 17:40:10 -07:00
Daniel Nelson
3e0c55bff9 Update grok version (#2662) 2017-04-12 17:10:17 -07:00
Jesús Roncero
49ab4e26f8 Nagios plugin documentation fix (#2659) 2017-04-12 12:04:44 -07:00
Daniel Nelson
360b10c4de Clarify precision documentation (#2655) 2017-04-12 10:42:11 -07:00
Daniel Nelson
2c98e5ae66 Add collectd parser (#2654) 2017-04-12 10:41:26 -07:00
Nick Irvine
0193cbee51 Add max_message_len in kafka_consumer input (#2636) 2017-04-11 12:05:39 -07:00
Daniel Nelson
f55af7d21f Use name filter for IOCounters in diskio (#2649)
Use IOCountersForNames for disk counters.
2017-04-11 11:41:09 -07:00
Patrick Hemmer
516dffa4c4 set default measurement name on snmp input (#2639) 2017-04-10 16:45:02 -07:00
Daniel Nelson
62b5c1f7e7 Add support for precision in http_listener (#2644) 2017-04-10 16:39:40 -07:00
Daniel Nelson
07c428ef89 Use random port in http_listener tests 2017-04-10 14:39:39 -07:00
Vladimir S
aa722fac9b Add dmcache input plugin (#1667) 2017-04-07 15:39:43 -07:00
Rajaseelan Ganeswaran
7cc4ca2341 Add sample config stanza for CPU (#2620) 2017-04-06 14:44:02 -07:00
Victor Yunevich
92fa20cef2 ipmi_sensor: allow @ symbol in password (#2633) 2017-04-06 14:40:34 -07:00
Daniel Nelson
c9f8308f27 Update filtering documentation (#2631) 2017-04-06 12:06:08 -07:00
James
5ffc9fd379 fix postgresql connection leak (#2611) 2017-04-04 17:37:44 -07:00
Daniel Nelson
8bf193dc06 Update httpjson documentation (#2619)
closes  #2536
2017-04-03 18:34:04 -07:00
Patrick Hemmer
f2805fd4aa socket_listener: clean up unix socket file on start & stop (#2618) 2017-04-03 18:06:51 -07:00
Shakeel Sorathia
35e4390168 Docker: optionally add labels as tags (#2425) 2017-04-03 13:43:15 -07:00
Patrick Hemmer
51c99d5b67 add support for linux sysctl fs metrics (#2609) 2017-03-31 14:01:02 -07:00
Daniel Nelson
540f98e228 Fix possible deadlock when output cannot write. (#2610) 2017-03-31 12:45:28 -07:00
Dmitry Ulyanov
c980c92cd5 Added pprof tool (#2512) 2017-03-29 18:28:43 -07:00
Daniel Nelson
9495b615f5 Update changelog for #2587 2017-03-29 17:15:11 -07:00
tjmcs
fb1c7d0154 Adds a new json_timestamp_units configuration parameter (#2587) 2017-03-29 17:12:29 -07:00
Patrick Hemmer
03ee6022f3 fix race in testutil Accumulator.Wait() (#2598) 2017-03-29 17:03:06 -07:00
djjorjinho
cc5b2f68b6 fix timestamp parsing on prometheus plugin (#2596) 2017-03-29 15:04:29 -07:00
Daniel Nelson
2d7f612bd7 Use fork of hpcloud/tail (#2595) 2017-03-29 14:25:33 -07:00
Daniel Nelson
9e036b2d65 Remove wait loop in riemann tests
This testcase still has a race condition but I believe it is when the
test does not complete quickly enough.
2017-03-28 13:05:10 -07:00
mgresser
1100a98f11 Removed duplicate evictions metric (#2577) 2017-03-28 10:47:00 -07:00
Daniel Nelson
37689f4df6 Add elasticsearch output to changelog 2017-03-28 10:22:28 -07:00
Daniel Nelson
78c7f4e4af Add write timeout to Riemann output (#2576) 2017-03-27 15:49:45 -07:00
Daniel Nelson
84a9f91f5c Skip elasticsearch output integration test in short mode 2017-03-27 15:05:06 -07:00
Daniel Nelson
5612df48f9 Update telegraf.conf 2017-03-27 14:49:04 -07:00
Daniel Nelson
0fa9001453 Clarify influxdb output url format
closes #2568
2017-03-24 16:04:18 -07:00
Patrick Hemmer
995546e7c6 snmp: support table indexes as tags (#2366) 2017-03-24 12:06:52 -07:00
Patrick Hemmer
1402c158b7 remove sleep from tests (#2555) 2017-03-24 12:03:36 -07:00
Oskar
616b66f5cb Multi instances in win_perf_counters (#2352) 2017-03-22 12:04:58 -07:00
Daniel Nelson
70a0a84882 Really fix procstat initialization 2017-03-21 11:40:51 -07:00
Daniel Nelson
5c33c760c7 Fix procstat initialization 2017-03-21 10:59:41 -07:00
Leandro Piccilli
bb28fb256b Add Elasticsearch 5.x output (#2332) 2017-03-20 17:47:57 -07:00
Daniel Nelson
a962e958eb Refactor procstat input (#2540)
fixes #1636 
fixes #2315
2017-03-17 16:49:11 -07:00
Patrick Hemmer
8514acdc3c return error on unsupported serializer data format (#2542) 2017-03-17 10:14:03 -07:00
Antoine Augusti
426182b81a Update default value for Cloudwatch rate limit (#2520) 2017-03-15 15:20:18 -07:00
Daniel Nelson
7a5d857846 Add support for new SSL configuration to mongodb (#2522)
closes #2519
2017-03-10 11:27:55 -08:00
jeremydenoun
13f314a507 Report DEAD (X) State Process (#2501)
Report count of processes in dead (X) process state from the processes input.  This process state is only valid on Linux.
2017-03-09 11:28:54 -08:00
Daniel Nelson
ea6e0b8259 Fix typo in postgresql README 2017-03-09 10:13:31 -08:00
Cameron Sparr
e811e2600d create telegraf.d directory in tarball
closes #2513
2017-03-09 11:41:08 +00:00
Timothy
49c212337f Update CONFIGURATION.md (#2516)
Add information about default configuration file locations.  Also mention that the config directory option is available.
2017-03-09 11:21:03 +00:00
Dennis Dryden
d243d69a09 Add configuration docs to Postgresql input plugin (#2515)
* Add configuration docs to Postgresql input plugin

Add configuration docs to PostgreSQL input plugin README (mostly from the source code) though I've not included the configuration example that seems to use all he connections on the database[1].

[1] https://github.com/influxdata/telegraf/issues/2410

* Fix typo in readme and sampleConfig string.
2017-03-09 11:19:03 +00:00
jeremydenoun
ae6a5d2255 Remove warning if parse empty content (#2500)
closes #2448
2017-03-08 14:08:55 -08:00
Robpol86
56aa89e5c8 Exporting Ipmi.Path to be set by config. (#2498)
* Exporting Ipmi.Path to be set by config.

Currently "path" is not exported, giving this error when users try to
override the variable via telegraf.conf as per the sample config:

`field corresponding to `path' is not defined in `*ipmi_sensor.Ipmi'`

Exporting the variable solves the problem.

* Updating changelog.
2017-03-08 16:38:36 +00:00
vvvkamper
7513fcac4e Fix part 2 of #1291
added PDH_FMT_NOCAP100 format option

closes #2483
2017-03-08 13:39:03 +00:00
Cameron Sparr
9df2974a0f update gopsutil for file close fixes
hopefully this will fix #2472
2017-03-08 12:54:17 +00:00
Daniel Nelson
ceb36adac7 Update issue template 2017-03-06 11:20:53 -08:00
Cameron Sparr
7a8e821731 Revert "Procstat: don't cache PIDs" (#2479) 2017-03-06 15:59:36 +00:00
François de Metz
76bcdecd21 Respond 200 when receiving a ping event. (#2492) 2017-03-06 12:34:41 +00:00
Jack Zampolin
10744646db AMQP Consumer plugin (#1678) 2017-03-03 10:24:50 -08:00
Charles-Henri
1873abd248 Iptables input: document better the ignored rules behavior (#2482)
During issue #2215 it was highlighted that the current behavior where
rules without a comment are ignored is confusing for several users.

This commit improves the documentation and adds a NOTE to the sample
config to clarify the behavior for new users.
2017-03-02 09:58:26 +00:00
Chris Koehnke
9618515926 Disk counter array newline (#2481)
Tweak formatting of `LogicalDisk` counter array to have one entry per
line.
2017-03-02 08:43:33 +00:00
Cameron Sparr
a251adb838 Fix type conflict on windows ping plugin (#2462)
closes #1433
2017-03-01 11:22:42 +00:00
Cameron Sparr
9e810ac463 Handle nil os.FileInfo in filepath.Walk
closes #2466
2017-02-28 17:51:03 +00:00
Cameron Sparr
b9457a1092 log error message when invalid regex is used
closes #2178
2017-02-28 12:48:14 +00:00
Cameron Sparr
6f2eeae498 Remove sleep from riemann test 2017-02-28 12:46:27 +00:00
Cameron Sparr
42a41d33cc add cgroup plugin to README 2017-02-24 09:43:22 +00:00
Cameron Sparr
81408f9da7 switch out deprecated docker client library
closes #2071
2017-02-22 10:55:00 +00:00
Rickard von Essen
c4212d69c9 Updated readme, now requires Go 1.8 (#2455) 2017-02-21 22:13:22 +01:00
Carlos
e17164d3f0 Added default config to file output pugin's README (#2426) 2017-02-20 11:50:39 +01:00
Cameron Sparr
e5349393f8 Check for errors in user stats & process list
closes #2414
2017-02-17 15:38:33 +00:00
Cameron Sparr
06176ef410 Only set the buffer size once
fixes #2380
2017-02-17 14:11:15 +00:00
Cameron Sparr
2a3448c8f3 socket_writer output plugin README 2017-02-16 23:13:14 +00:00
Leandro Piccilli
5da40d56ad Check if tag value is empty before allocation
closes #2390
closes #2404
2017-02-16 23:07:27 +00:00
Cameron Sparr
54c9a385d5 Fix prometheus_client reload behavior
fixes #2282
2017-02-16 21:57:13 +00:00
Priyank Trivedi
25c55419df Fix typo - Default from Defalt (#2417) 2017-02-16 19:03:17 +00:00
Yaron de Leeuw
c19fb1535e README: update golang requirement to 1.7 (#2412)
The docker engine-api package we use needs golang 1.7+, see:
https://github.com/docker/engine-api/pull/382#issuecomment-244512952

So telegraf won't compile without 1.7
2017-02-15 17:17:26 +00:00
François de Metz
45a168e425 Fix setting the username and the password to the influxdb output. (#2401) 2017-02-13 15:30:30 +00:00
Cameron Sparr
22243a8354 Skip service input plugins in test mode 2017-02-13 10:40:38 +00:00
Cameron Sparr
ff9369f1a1 prepend 'inputs.' to --test output check 2017-02-13 10:33:51 +00:00
Cameron Sparr
21cf79163c don't use influxdata/config, just use influxdata/toml 2017-02-10 17:27:18 +00:00
Cameron Sparr
f05fac74cb update naoina/toml to do config validation 2017-02-10 17:05:13 +00:00
Cameron Sparr
c8cc01ba6a deprecate udp_listener & tcp_listener 2017-02-06 10:41:44 +00:00
Cameron Sparr
694955c87b Remove metric.Point from metric interface 2017-02-03 16:53:07 +00:00
Cosmo Petrich
b1945c0493 Increment gather_errors for all input errors
closes #2339
2017-02-03 11:22:31 +00:00
Cameron Sparr
1c4673e900 changelog update 2017-02-03 10:04:50 +00:00
Nick Irvine
dfb4038654 Remove pidfile if pidfile was created (#2358)
Also, ensure pidfile perms are 644
2017-02-03 10:02:19 +00:00
Patrick Hemmer
b3537ef2a8 add socket listener & writer (#2094)
closes #1516 
closes #1711 
closes #1721 
closes #1526
2017-02-02 16:24:03 +00:00
Yaron de Leeuw
0ce44648cf Procstat: don't cache PIDs (#2206)
* Procstat: don't cache PIDs

Changed the procstat input plugin to not cache PIDs. Solves #1636.
The logic of creating a process by pid was moved from `procstat.go` to
`spec_processor.go`.

* Procstat: go fmt

* procstat: modify changelog for #2206
2017-02-02 14:12:22 +00:00
Patrick Hemmer
55d3f70771 add missing fields to haproxy input (#2323) 2017-02-02 13:46:53 +00:00
Matteo Cerutti
a610f8bd03 allow querying sensors via the open interface
closes #2244
closes #1547
2017-02-02 13:31:04 +00:00
Cameron Sparr
dfba3ff37a fix telegraf swallowing panics in --test mode
this defer function was causing telegraf to call os.Exit(0) instead of
panicking when it was supposed to.

closes #2341
2017-02-02 12:14:35 +00:00
Cameron Sparr
285be648c4 Godeps update
closes #2356
2017-02-02 09:52:06 +00:00
Cameron Sparr
f7d551a807 Add more nested globpath tests 2017-02-01 23:44:35 +00:00
Nathan Haugo
3f224a15d5 Update readme to link to k8s plugin (#2355) 2017-02-01 21:23:45 +00:00
Jérôme Vizcaino
c0bbde03ea Ceph: represent pgmap states using tags (#2229)
* ceph: maps are already refs, no need to use a pointer

* ceph: pgmap_states are represented in a single metric "count", differenciated by tag

* Update CHANGELOG
2017-02-01 14:47:23 +00:00
Cameron Sparr
97050e9669 changelog update 2017-02-01 14:41:58 +00:00
James Gregory
eafd1dcc7c Kubernetes input: Handle null startTime for stopped pods (#2335) 2017-02-01 14:41:04 +00:00
Cameron Sparr
c528c53e5b iptables changelog update 2017-02-01 14:39:16 +00:00
ldep30
07a6223932 Add lock option to the IPtables input plugin (#2201)
* Update README.md

* Add lock support to the IPtables input plugin

* Update iptables.go

Doc cleaning
2017-02-01 14:37:18 +00:00
Cameron Sparr
aeb849d744 changelog fix 2017-02-01 14:22:31 +00:00
Len Smith
9003efc3fa http_response : Add in support for looking for substring in response (#2204)
* Add in support for looking for substring in response

* Add note to CHANGELOG.md

* Switch from substring match to regex match

* Requested code changes

* Make requested changes and refactor to avoid nested if-else.

* Convert tabs to space and compile regex once
2017-02-01 14:21:08 +00:00
Pierre Fersing
32e06a489d Keep -config-directory when running as Windows service (#2330)
* Keep -config-directory when running as Windows service

* Update changelog
2017-02-01 14:12:35 +00:00
njwhite
2932db8480 Make Logparser Plugin Check For New Files (#2141)
* Make Logparser Plugin Check For New Files

Check in the Gather metric to see if any new files matching the glob
have appeared. If so, start tailing them from the beginning.

* changelog update for #2141
2017-02-01 14:11:39 +00:00
Cameron Sparr
19dee32287 Go 1.7.5 update cherry-picked to 1.2.1 release 2017-02-01 10:11:16 +00:00
Cameron Sparr
4dad723088 Changelog update 2017-02-01 10:07:31 +00:00
Cameron Sparr
54cfbb5b87 metric: Fix negative number handling
closes #2324
2017-02-01 10:07:31 +00:00
Martin
3e37dda7b0 Go version 1.7.4 -> 1.7.5 (#2348) 2017-02-01 10:07:02 +00:00
Cameron Sparr
fb7931591d Changelog update 2017-02-01 08:59:54 +00:00
Cameron Sparr
e87ce22af9 running output: Drop nil metrics
fixes #2317
2017-02-01 08:55:22 +00:00
John Engelman
738cbbdbb6 Add numerical representation of Consul health check state. (#2277) 2017-01-28 16:47:25 -08:00
Patrick Hemmer
074e6d177c add support for diskio name templates & udev tags
closes #1453
closes #1386
closes #1428
2017-01-27 16:15:42 -08:00
Cameron Sparr
1d864ebd40 Fix riemann output unit tests 2017-01-27 15:08:21 -08:00
Cameron Sparr
e9decadf75 Riemann rewrite changelog update 2017-01-27 14:59:35 -08:00
Fabio Berchtold
3fa37a9212 Rewriting Riemann output plugin (#1900)
* rename to riemann_legacy

Signed-off-by: Fabio Berchtold <fabio.berchtold@swisscom.com>

* initial draft for Riemann output plugin rewrite

Signed-off-by: Fabio Berchtold <fabio.berchtold@swisscom.com>

* add unit tests

Signed-off-by: Fabio Berchtold <fabio.berchtold@swisscom.com>

* add option to send string metrics as states

Signed-off-by: Fabio Berchtold <fabio.berchtold@swisscom.com>

* add integration tests

Signed-off-by: Fabio Berchtold <fabio.berchtold@swisscom.com>

* add plugin README.md

Signed-off-by: Fabio Berchtold <fabio.berchtold@swisscom.com>

* bump riemann library

* clarify settings description

Signed-off-by: Fabio Berchtold <fabio.berchtold@swisscom.com>

* update Readme.md with updated description

Signed-off-by: Fabio Berchtold <fabio.berchtold@swisscom.com>

* add Riemann event examples

Signed-off-by: Fabio Berchtold <fabio.berchtold@swisscom.com>

* use full URL for Riemann server address

Signed-off-by: Fabio Berchtold <fabio.berchtold@swisscom.com>

closes #1878
2017-01-27 14:54:59 -08:00
Cameron Sparr
c9e87a39f8 Revert using fasthttp library to net/http 2017-01-25 16:42:23 -08:00
Cameron Sparr
4a5d313693 Improve the InfluxDB through-put performance
This changes the current use of the InfluxDB client to instead use a
baked-in client that uses the fasthttp library.

This allows for significantly smaller allocations, the re-use of http
body buffers, and the re-use of the actual bytes of the line-protocol
metric representations.
2017-01-25 11:54:16 -08:00
Cameron Sparr
168270ea5f ntpq: correct number of seconds in an hour
closes #2256
2017-01-24 15:27:44 -08:00
Jonas Hahnfeld
c4d4185fb5 snmp: Allow lines with empty or missing tags (#2172)
The changes in #1848 resulted in lines being dropped if they had an empty
tag. Let's allow all lines that have empty or missing tags!
2017-01-24 14:57:43 -08:00
Kali Hernandez
822333690f Debian package: check for group before useradd (#2107)
Fixes #2106
2017-01-24 14:54:19 -08:00
Will Pearson
d7a8bb2214 Fix problem with graphite talking to closed connections (#2171)
We were having problems with telegraf talking to
carbon-relay-ng using the graphite output. When
the carbon-relay-ng server restarted the connection
the telegraf side would go into CLOSE_WAIT but telegraf
would continue to send statistics through the connection.

Reading around it seems you need to a read from the connection
and see a EOF error. We've implemented this and added a test
that replicates roughly the error we were having.

Pair: @whpearson @joshmyers
2017-01-24 12:50:29 -08:00
Pierre Fersing
a505123e60 Improve win_perf_counters on non English systems (#2261) 2017-01-24 12:46:06 -08:00
Pierre Fersing
be10b19760 Added more Windows metrics (#2290)
Signed-off-by: Pierre Fersing <pierre.fersing@bleemeo.com>
2017-01-24 12:38:10 -08:00
James
b9ae3d6a57 fix postgresql 'name', and 'oid' data types by switching to a driver (#1750)
that handles them properly
2017-01-24 12:36:36 -08:00
Cameron Sparr
c882570983 32-bit binary for windows and freebsd
closes #1346
closes #2218
2017-01-23 20:28:13 -08:00
Cameron Sparr
80411f99f0 influxdb output: treat field type conflicts as a successful write
If we write a batch of points and get a "field type conflict" error
message in return, we should drop the entire batch of points because
this indicates that one or more points have a type that doesnt match the
database.

These errors will never go away on their own, and InfluxDB will
successfully write the points that dont have a conflict.

closes #2245
2017-01-23 16:41:29 -08:00
Cameron Sparr
6df3f0fdae Run scheduled flushes in background
doing this unblocks incoming metrics while waiting for a flush to take
place.

we have to create a semaphore so that we can
'skip' flushes that try to run while a flush is already running.

closes #2262
2017-01-23 14:41:40 -08:00
Cameron Sparr
22340ad984 Add newline to influx line-protocol if not present
closes #2297
2017-01-23 13:52:20 -08:00
Cameron Sparr
c15504c509 opentsdb: add tcp:// prefix if not present
closes #2299
2017-01-23 13:45:16 -08:00
Claudius Zingerli
20bf90ee52 Add minimal documentation to the diskio plugin (#2296)
* Add documentation to diskio plugin

* Update spelling, fix iops_in_progress unit
2017-01-21 15:08:17 -08:00
Cameron Sparr
3de6bfbcb8 Direct people to downloads page for installation 2017-01-13 17:02:10 +00:00
Cameron Sparr
e0c6262e0b mysql build fixup and changelog update 2017-01-13 14:44:28 +00:00
Pierre Fersing
9b2f6499e7 Added more InnoDB metric to MySQL plugin (#2179) 2017-01-13 14:28:56 +00:00
Cameron Sparr
9262712f0a Changelog update and go fmt 2017-01-13 14:27:20 +00:00
acezellponce
0c9da0985a Added userstats to mysql input plugin (#2137)
* Added GatherUserStatistics, row Uptime in gatherGlobalStatuses, and version fields & tags

* Updated README file

* pulling in latest from master

* ran go fmt to fix formatting

* fix unreachable code

* few fixes

* cleaning up and applying suggestions from sparrc
2017-01-13 14:25:25 +00:00
Viet Hung Nguyen
b89c45b858 Ignore devfs on OSX (#2232) 2017-01-13 14:19:57 +00:00
Cameron Sparr
b60b360f13 Changelog update 2017-01-13 13:50:07 +00:00
Kebus1
734988d732 Fixed Bug 2077 SQL Server (#2212) 2017-01-13 13:47:47 +00:00
627 changed files with 87258 additions and 12972 deletions

106
.circleci/config.yml Normal file
View File

@@ -0,0 +1,106 @@
---
defaults:
defaults: &defaults
working_directory: '/go/src/github.com/influxdata/telegraf'
go-1_8: &go-1_8
docker:
- image: 'circleci/golang:1.8.7'
go-1_9: &go-1_9
docker:
- image: 'circleci/golang:1.9.5'
go-1_10: &go-1_10
docker:
- image: 'circleci/golang:1.10.1'
version: 2
jobs:
deps:
<<: [ *defaults, *go-1_10 ]
steps:
- checkout
- run: 'make deps'
- persist_to_workspace:
root: '/go/src'
paths:
- '*'
test-go-1.8:
<<: [ *defaults, *go-1_8 ]
steps:
- attach_workspace:
at: '/go/src'
- run: 'make test-ci'
test-go-1.9:
<<: [ *defaults, *go-1_9 ]
steps:
- attach_workspace:
at: '/go/src'
- run: 'make test-ci'
test-go-1.10:
<<: [ *defaults, *go-1_10 ]
steps:
- attach_workspace:
at: '/go/src'
- run: 'make test-ci'
- run: 'GOARCH=386 make test-ci'
release:
<<: [ *defaults, *go-1_10 ]
steps:
- attach_workspace:
at: '/go/src'
- run: './scripts/release.sh'
- store_artifacts:
path: './artifacts'
destination: '.'
nightly:
<<: [ *defaults, *go-1_10 ]
steps:
- attach_workspace:
at: '/go/src'
- run: './scripts/release.sh'
- store_artifacts:
path: './artifacts'
destination: '.'
workflows:
version: 2
build_and_release:
jobs:
- 'deps'
- 'test-go-1.8':
requires:
- 'deps'
- 'test-go-1.9':
requires:
- 'deps'
- 'test-go-1.10':
requires:
- 'deps'
- 'release':
requires:
- 'test-go-1.8'
- 'test-go-1.9'
- 'test-go-1.10'
nightly:
jobs:
- 'deps'
- 'test-go-1.8':
requires:
- 'deps'
- 'test-go-1.9':
requires:
- 'deps'
- 'test-go-1.10':
requires:
- 'deps'
- 'nightly':
requires:
- 'test-go-1.8'
- 'test-go-1.9'
- 'test-go-1.10'
triggers:
- schedule:
cron: "0 7 * * *"
filters:
branches:
only:
- master

View File

@@ -1,7 +1,7 @@
## Directions
GitHub Issues are reserved for actionable bug reports and feature requests.
General questions should be sent to the [InfluxDB mailing list](https://groups.google.com/forum/#!forum/influxdb).
General questions should be asked at the [InfluxData Community](https://community.influxdata.com) site.
Before opening an issue, search for similar bug reports or feature requests on GitHub Issues.
If no similar issue can be found, fill out either the "Bug Report" or the "Feature Request" section below.

View File

@@ -1,5 +1,5 @@
### Required for all PRs:
- [ ] CHANGELOG.md updated (we recommend not updating this until the PR has been approved by a maintainer)
- [ ] Sign [CLA](https://influxdata.com/community/cla/) (if not already signed)
- [ ] README.md updated (if adding a new plugin)
- [ ] Signed [CLA](https://influxdata.com/community/cla/).
- [ ] Associated README.md updated.
- [ ] Has appropriate unit tests.

9
.gitignore vendored
View File

@@ -1,7 +1,4 @@
build
tivan
.vagrant
/build
/telegraf
.idea
*~
*#
/telegraf.exe
/telegraf.gz

View File

@@ -1,4 +1,661 @@
## v1.2 [unreleased]
## v1.7 [unreleased]
### Release Notes
- The `cassandra` input plugin has been deprecated in favor of the `jolokia2`
input plugin which is much more configurable and more performant. There is
an [example configuration](./plugins/inputs/jolokia2/examples) to help you
get started.
### New Inputs
- [fibaro](./plugins/inputs/fibaro/README.md) - Contributed by @dynek
- [mcrouter](./plugins/inputs/mcrouter/README.md) - Contributed by @cthayer
- [nvidia_smi](./plugins/inputs/nvidia_smi/README.md) - Contributed by @jackzampolin
### Features
- [#3964](https://github.com/influxdata/telegraf/pull/3964): Add repl_oplog_window_sec metric to mongodb input.
- [#3819](https://github.com/influxdata/telegraf/pull/3819): Add per-host shard metrics in mongodb input.
- [#3999](https://github.com/influxdata/telegraf/pull/3999): Skip files with leading `..` in config directory.
- [#4021](https://github.com/influxdata/telegraf/pull/4021): Add TLS support to socket_writer and socket_listener plugins.
- [#4025](https://github.com/influxdata/telegraf/pull/4025): Add snmp input option to strip non fixed length index suffixes.
- [#4035](https://github.com/influxdata/telegraf/pull/4035): Add server version tag to docker input.
- [#4044](https://github.com/influxdata/telegraf/pull/4044): Add support for LeoFS 1.4 to leofs input.
- [#4068](https://github.com/influxdata/telegraf/pull/4068): Add parameter to force the interval of gather for sysstat.
- [#3877](https://github.com/influxdata/telegraf/pull/3877): Support busybox ping in the ping input.
- [#4077](https://github.com/influxdata/telegraf/pull/4077): Add input plugin for McRouter.
### Bugfixes
- [#4018](https://github.com/influxdata/telegraf/pull/4018): Write to working file outputs if any files are not writeable.
- [#4036](https://github.com/influxdata/telegraf/pull/4036): Add all win_perf_counters fields for a series in a single metric.
## v1.6.2 [unreleased]
### Bugfixes
- [#4078](https://github.com/influxdata/telegraf/pull/4078): Use same timestamp for fields in system input.
## v1.6.1 [2018-04-23]
### Bugfixes
- [#3835](https://github.com/influxdata/telegraf/issues/3835): Report mem input fields as gauges instead counters.
- [#4030](https://github.com/influxdata/telegraf/issues/4030): Fix graphite outputs unsigned integers in wrong format.
- [#4043](https://github.com/influxdata/telegraf/issues/4043): Report available fields if utmp is unreadable.
- [#4039](https://github.com/influxdata/telegraf/issues/4039): Fix potential "no fields" error writing to outputs.
- [#4037](https://github.com/influxdata/telegraf/issues/4037): Fix uptime reporting in system input when ran inside docker.
- [#3750](https://github.com/influxdata/telegraf/issues/3750): Fix mem input "cannot allocate memory" error on FreeBSD based systems.
- [#4056](https://github.com/influxdata/telegraf/pull/4056): Fix duplicate tags when overriding an existing tag.
- [#4062](https://github.com/influxdata/telegraf/pull/4062): Add server argument as first argument in unbound input.
- [#4063](https://github.com/influxdata/telegraf/issues/4063): Fix handling of floats with multiple leading zeroes.
- [#4064](https://github.com/influxdata/telegraf/issues/4064): Return errors in mongodb SSL/TLS configuration.
## v1.6 [2018-04-16]
### Release Notes
- The `mysql` input plugin has been updated fix a number of type convertion
issues. This may cause a `field type error` when inserting into InfluxDB due
the change of types.
To address this we have introduced a new `metric_version` option to control
enabling the new format. For in depth recommendations on upgrading please
reference the [mysql plugin documentation](./plugins/inputs/mysql/README.md#metric-version).
It is encouraged to migrate to the new model when possible as the old version
is deprecated and will be removed in a future version.
- The `postgresql` plugins now defaults to using a persistent connection to the database.
In environments where TCP connections are terminated the `max_lifetime`
setting should be set less than the collection `interval` to prevent errors.
- The `sqlserver` input plugin has a new query and data model that can be enabled
by setting `query_version = 2`. It is encouraged to migrate to the new
model when possible as the old version is deprecated and will be removed in
a future version.
- An option has been added to the `openldap` input plugin that reverses metric
name to improve grouping. This change is enabled when `reverse_metric_names = true`
is set. It is encouraged to enable this option when possible as the old
ordering is deprecated.
- The new `http` input configured with `data_format = "json"` can perform the
same task as the, now deprecated, `httpjson` input.
### New Inputs
- [http](./plugins/inputs/http/README.md) - Thanks to @grange74
- [ipset](./plugins/inputs/ipset/README.md) - Thanks to @sajoupa
- [nats](./plugins/inputs/nats/README.md) - Thanks to @mjs & @levex
### New Processors
- [override](./plugins/processors/override/README.md) - Thanks to @KarstenSchnitter
### New Parsers
- [dropwizard](./docs/DATA_FORMATS_INPUT.md#dropwizard) - Thanks to @atzoum
### Features
- [#3551](https://github.com/influxdata/telegraf/pull/3551): Add health status mapping from string to int in elasticsearch input.
- [#3580](https://github.com/influxdata/telegraf/pull/3580): Add control over which stats to gather in basicstats aggregator.
- [#3596](https://github.com/influxdata/telegraf/pull/3596): Add messages_delivered_get to rabbitmq input.
- [#3632](https://github.com/influxdata/telegraf/pull/3632): Add wired field to mem input.
- [#3619](https://github.com/influxdata/telegraf/pull/3619): Add support for gathering exchange metrics to the rabbitmq input.
- [#3565](https://github.com/influxdata/telegraf/pull/3565): Add support for additional metrics on Linux in zfs input.
- [#3524](https://github.com/influxdata/telegraf/pull/3524): Add available_entropy field to kernel input plugin.
- [#3643](https://github.com/influxdata/telegraf/pull/3643): Add user privilege level setting to IPMI sensors.
- [#2701](https://github.com/influxdata/telegraf/pull/2701): Use persistent connection to postgresql database.
- [#2846](https://github.com/influxdata/telegraf/pull/2846): Add support for dropwizard input format.
- [#3666](https://github.com/influxdata/telegraf/pull/3666): Add container health metrics to docker input.
- [#3687](https://github.com/influxdata/telegraf/pull/3687): Add support for using globs in devices list of diskio input plugin.
- [#2754](https://github.com/influxdata/telegraf/pull/2754): Allow running as console application on Windows.
- [#3703](https://github.com/influxdata/telegraf/pull/3703): Add listener counts and node running status to rabbitmq input.
- [#3674](https://github.com/influxdata/telegraf/pull/3674): Add NATS Monitoring Input Plugin.
- [#3702](https://github.com/influxdata/telegraf/pull/3702): Add ability to select which queues will be gathered in rabbitmq input.
- [#3726](https://github.com/influxdata/telegraf/pull/3726): Add support for setting bsd source address to the ping input.
- [#3346](https://github.com/influxdata/telegraf/pull/3346): Add Ipset input plugin.
- [#3719](https://github.com/influxdata/telegraf/pull/3719): Add TLS and HTTP basic auth to prometheus_client output.
- [#3618](https://github.com/influxdata/telegraf/pull/3618): Add new sqlserver output data model.
- [#3559](https://github.com/influxdata/telegraf/pull/3559): Add native Go method for finding pids to procstat.
- [#3722](https://github.com/influxdata/telegraf/pull/3722): Add additional metrics and reverse metric names option to openldap.
- [#3769](https://github.com/influxdata/telegraf/pull/3769): Add TLS support to the mesos input plugin.
- [#3546](https://github.com/influxdata/telegraf/pull/3546): Add http input plugin.
- [#3781](https://github.com/influxdata/telegraf/pull/3781): Add keep alive support to the TCP mode of statsd.
- [#3783](https://github.com/influxdata/telegraf/pull/3783): Support deadline in ping plugin.
- [#3765](https://github.com/influxdata/telegraf/pull/3765): Add option to disable labels in prometheus output for string fields.
- [#3808](https://github.com/influxdata/telegraf/pull/3808): Add shard server stats to the mongodb input plugin.
- [#3713](https://github.com/influxdata/telegraf/pull/3713): Add server option to unbound plugin.
- [#3804](https://github.com/influxdata/telegraf/pull/3804): Convert boolean metric values to float in datadog output.
- [#3799](https://github.com/influxdata/telegraf/pull/3799): Add Solr 3 compatibility.
- [#3797](https://github.com/influxdata/telegraf/pull/3797): Add sum stat to basicstats aggregator.
- [#3626](https://github.com/influxdata/telegraf/pull/3626): Add ability to override proxy from environment in http response.
- [#3853](https://github.com/influxdata/telegraf/pull/3853): Add host to ping timeout log message.
- [#3773](https://github.com/influxdata/telegraf/pull/3773): Add override processor.
- [#3814](https://github.com/influxdata/telegraf/pull/3814): Add status_code and result tags and result_type field to http_response input.
- [#3880](https://github.com/influxdata/telegraf/pull/3880): Added config flag to skip collection of network protocol metrics.
- [#3927](https://github.com/influxdata/telegraf/pull/3927): Add TLS support to kapacitor input.
- [#3496](https://github.com/influxdata/telegraf/pull/3496): Add HTTP basic auth support to the http_listener input.
- [#3452](https://github.com/influxdata/telegraf/issues/3452): Tags in output InfluxDB Line Protocol are now sorted.
- [#3631](https://github.com/influxdata/telegraf/issues/3631): InfluxDB Line Protocol parser now accepts DOS line endings.
- [#2496](https://github.com/influxdata/telegraf/issues/2496): An option has been added to skip database creation in the InfluxDB output.
- [#3366](https://github.com/influxdata/telegraf/issues/3366): Add support for connecting to InfluxDB over a unix domain socket.
- [#3946](https://github.com/influxdata/telegraf/pull/3946): Add optional unsigned integer support to the influx data format.
- [#3811](https://github.com/influxdata/telegraf/pull/3811): Add TLS support to zookeeper input.
- [#2737](https://github.com/influxdata/telegraf/issues/2737): Add filters for container state to docker input.
### Bugfixes
- [#1896](https://github.com/influxdata/telegraf/issues/1896): Fix various mysql data type conversions.
- [#3810](https://github.com/influxdata/telegraf/issues/3810): Fix metric buffer limit in internal plugin after reload.
- [#3801](https://github.com/influxdata/telegraf/issues/3801): Fix panic in http_response on invalid regex.
- [#3973](https://github.com/influxdata/telegraf/issues/3873): Fix socket_listener setting ReadBufferSize on tcp sockets.
- [#1575](https://github.com/influxdata/telegraf/issues/1575): Add tag for target url to phpfpm input.
- [#3868](https://github.com/influxdata/telegraf/issues/3868): Fix cannot unmarshal object error in DC/OS input.
- [#3648](https://github.com/influxdata/telegraf/issues/3648): Fix InfluxDB output not able to reconnect when server address changes.
- [#3957](https://github.com/influxdata/telegraf/issues/3957): Fix parsing of dos line endings in the smart input.
- [#3754](https://github.com/influxdata/telegraf/issues/3754): Fix precision truncation when no timestamp included.
- [#3655](https://github.com/influxdata/telegraf/issues/3655): Fix SNMPv3 connection with Cisco ASA 5515 in snmp input.
- [#3981](https://github.com/influxdata/telegraf/pull/3981): Export all vars defined in /etc/default/telegraf.
- [#4004](https://github.com/influxdata/telegraf/issues/4004): Allow grok pattern to contain newlines.
## v1.5.3 [2018-03-14]
### Bugfixes
- [#3729](https://github.com/influxdata/telegraf/issues/3729): Set path to / if HOST_MOUNT_PREFIX matches full path.
- [#3739](https://github.com/influxdata/telegraf/issues/3739): Remove userinfo from url tag in prometheus input.
- [#3778](https://github.com/influxdata/telegraf/issues/3778): Fix ping plugin not reporting zero durations.
- [#3697](https://github.com/influxdata/telegraf/issues/3697): Disable keepalive in mqtt output to prevent deadlock.
- [#3786](https://github.com/influxdata/telegraf/pull/3786): Fix collation difference in sqlserver input.
- [#3871](https://github.com/influxdata/telegraf/pull/3871): Fix uptime metric in passenger input plugin.
- [#3851](https://github.com/influxdata/telegraf/issues/3851): Add output of stderr in case of error to exec log message.
## v1.5.2 [2018-01-30]
### Bugfixes
- [#3684](https://github.com/influxdata/telegraf/pull/3684): Ignore empty lines in Graphite plaintext.
- [#3604](https://github.com/influxdata/telegraf/issues/3604): Fix index out of bounds error in solr input plugin.
- [#3680](https://github.com/influxdata/telegraf/pull/3680): Reconnect before sending graphite metrics if disconnected.
- [#3693](https://github.com/influxdata/telegraf/pull/3693): Align aggregator period with internal ticker to avoid skipping metrics.
- [#3629](https://github.com/influxdata/telegraf/issues/3629): Fix a potential deadlock when using aggregators.
- [#3697](https://github.com/influxdata/telegraf/issues/3697): Limit wait time for writes in mqtt output.
- [#3698](https://github.com/influxdata/telegraf/issues/3698): Revert change in graphite output where dot in field key was replaced by underscore.
- [#3710](https://github.com/influxdata/telegraf/issues/3710): Add timeout to wavefront output write.
- [#3725](https://github.com/influxdata/telegraf/issues/3725): Exclude master_replid fields from redis input.
## v1.5.1 [2018-01-10]
### Bugfixes
- [#3624](https://github.com/influxdata/telegraf/pull/3624): Fix name error in jolokia2_agent sample config.
- [#3625](https://github.com/influxdata/telegraf/pull/3625): Fix DC/OS login expiration time.
- [#3593](https://github.com/influxdata/telegraf/pull/3593): Set Content-Type charset in influxdb output and allow it be overridden.
- [#3594](https://github.com/influxdata/telegraf/pull/3594): Document permissions setup for postfix input.
- [#3633](https://github.com/influxdata/telegraf/pull/3633): Fix deliver_get field in rabbitmq input.
- [#3607](https://github.com/influxdata/telegraf/issues/3607): Escape environment variables during config toml parsing.
## v1.5 [2017-12-14]
### New Plugins
- [basicstats](./plugins/aggregators/basicstats/README.md) - Thanks to @toni-moreno
- [bond](./plugins/inputs/bond/README.md) - Thanks to @ildarsv
- [cratedb](./plugins/outputs/cratedb/README.md) - Thanks to @felixge
- [dcos](./plugins/inputs/dcos/README.md) - Thanks to @influxdata
- [jolokia2](./plugins/inputs/jolokia2/README.md) - Thanks to @dylanmei
- [nginx_plus](./plugins/inputs/nginx_plus/README.md) - Thanks to @mplonka & @poblahblahblah
- [opensmtpd](./plugins/inputs/opensmtpd/README.md) - Thanks to @aromeyer
- [particle](./plugins/inputs/webhooks/particle/README.md) - Thanks to @davidgs
- [pf](./plugins/inputs/pf/README.md) - Thanks to @nferch
- [postfix](./plugins/inputs/postfix/README.md) - Thanks to @phemmer
- [smart](./plugins/inputs/smart/README.md) - Thanks to @rickard-von-essen
- [solr](./plugins/inputs/solr/README.md) - Thanks to @ljagiello
- [teamspeak](./plugins/inputs/teamspeak/README.md) - Thanks to @p4ddy1
- [unbound](./plugins/inputs/unbound/README.md) - Thanks to @aromeyer
- [wavefront](./plugins/outputs/wavefront/README.md) - Thanks to @puckpuck
### Release Notes
- In the `kinesis` output, use of the `partition_key` and
`use_random_partitionkey` options has been deprecated in favor of the
`partition` subtable. This allows for more flexible methods to set the
partition key such as by metric name or by tag.
- With the release of the new improved `jolokia2` input, the legacy `jolokia`
plugin is deprecated and will be removed in a future release. Users of this
plugin are encouraged to update to the new `jolokia2` plugin.
- In the `postgresql` and `postgresql_extensible` plugins, the type of the oid
data type has changed from string to integer. It is recommended to drop
affected fields until a new shard is started. For details on how to
workaround this issue please see [#3622](https://github.com/influxdata/telegraf/issues/3622).
### Features
- [#3170](https://github.com/influxdata/telegraf/pull/3170): Add support for sharding based on metric name.
- [#3196](https://github.com/influxdata/telegraf/pull/3196): Add Kafka output plugin topic_suffix option.
- [#3027](https://github.com/influxdata/telegraf/pull/3027): Include mount mode option in disk metrics.
- [#3191](https://github.com/influxdata/telegraf/pull/3191): TLS and MTLS enhancements to HTTPListener input plugin.
- [#3213](https://github.com/influxdata/telegraf/pull/3213): Add polling method to logparser and tail inputs.
- [#3211](https://github.com/influxdata/telegraf/pull/3211): Add timeout option for kubernetes input.
- [#3234](https://github.com/influxdata/telegraf/pull/3234): Add support for timing sums in statsd input.
- [#2617](https://github.com/influxdata/telegraf/issues/2617): Add resource limit monitoring to procstat.
- [#3236](https://github.com/influxdata/telegraf/pull/3236): Add support for k8s service DNS discovery to prometheus input.
- [#3245](https://github.com/influxdata/telegraf/pull/3245): Add configurable metrics endpoint to prometheus output.
- [#3214](https://github.com/influxdata/telegraf/pull/3214): Add new nginx_plus input plugin.
- [#3215](https://github.com/influxdata/telegraf/pull/3215): Add support for NSQLookupd to nsq_consumer.
- [#2278](https://github.com/influxdata/telegraf/pull/2278): Add redesigned Jolokia input plugin.
- [#3106](https://github.com/influxdata/telegraf/pull/3106): Add configurable separator for metrics and fields in opentsdb output.
- [#1692](https://github.com/influxdata/telegraf/pull/1692): Add support for the rollbar occurrence webhook event.
- [#3160](https://github.com/influxdata/telegraf/pull/3160): Add Wavefront output plugin.
- [#3281](https://github.com/influxdata/telegraf/pull/3281): Add extra wired tiger cache metrics to mongodb input.
- [#3141](https://github.com/influxdata/telegraf/pull/3141): Collect Docker Swarm service metrics in docker input plugin.
- [#2449](https://github.com/influxdata/telegraf/pull/2449): Add smart input plugin for collecting S.M.A.R.T. data.
- [#3269](https://github.com/influxdata/telegraf/pull/3269): Add cluster health level configuration to elasticsearch input.
- [#3304](https://github.com/influxdata/telegraf/pull/3304): Add ability to limit node stats in elasticsearch input.
- [#2167](https://github.com/influxdata/telegraf/pull/2167): Add new basicstats aggregator.
- [#3344](https://github.com/influxdata/telegraf/pull/3344): Add UDP IPv6 support to statsd input.
- [#3350](https://github.com/influxdata/telegraf/pull/3350): Use labels in prometheus output for string fields.
- [#3358](https://github.com/influxdata/telegraf/pull/3358): Add support for decimal timestamps to ts-epoch modifier.
- [#3337](https://github.com/influxdata/telegraf/pull/3337): Add histogram and summary types and use in prometheus plugins.
- [#3365](https://github.com/influxdata/telegraf/pull/3365): Gather concurrently from snmp agents.
- [#3333](https://github.com/influxdata/telegraf/issues/3333): Perform DNS lookup before ping and report result.
- [#3398](https://github.com/influxdata/telegraf/issues/3398): Add instance name option to varnish plugin.
- [#3406](https://github.com/influxdata/telegraf/pull/3406): Add support for SSL settings to ElasticSearch output plugin.
- [#3315](https://github.com/influxdata/telegraf/pull/3315): Add Teamspeak 3 input plugin.
- [#3305](https://github.com/influxdata/telegraf/pull/3305): Add modification_time field to filestat input plugin.
- [#2019](https://github.com/influxdata/telegraf/pull/2019): Add Solr input plugin.
- [#3210](https://github.com/influxdata/telegraf/pull/3210): Add CrateDB output plugin.
- [#3459](https://github.com/influxdata/telegraf/pull/3459): Add systemd unit pid and cgroup matching to procstat.
- [#3477](https://github.com/influxdata/telegraf/pull/3477): Add Particle Webhook Plugin.
- [#3471](https://github.com/influxdata/telegraf/pull/3471): Use MAX() instead of SUM() for latency measurements in sqlserver.
- [#3490](https://github.com/influxdata/telegraf/pull/3490): Add index by week number to Elasticsearch output.
- [#3434](https://github.com/influxdata/telegraf/pull/3434): Add unbound input plugin.
- [#3449](https://github.com/influxdata/telegraf/pull/3449): Add opensmtpd input plugin.
- [#3470](https://github.com/influxdata/telegraf/pull/3470): Add support for tags in the index name in elasticsearch output.
- [#2553](https://github.com/influxdata/telegraf/pull/2553): Add postfix input plugin.
- [#3424](https://github.com/influxdata/telegraf/pull/3424): Add bond input plugin.
- [#3518](https://github.com/influxdata/telegraf/pull/3518): Add slab to mem plugin.
- [#3519](https://github.com/influxdata/telegraf/pull/3519): Add input plugin for DC/OS.
- [#3140](https://github.com/influxdata/telegraf/pull/3140): Add support for glob patterns in net input plugin.
- [#3405](https://github.com/influxdata/telegraf/pull/3405): Add input plugin for OpenBSD/FreeBSD pf.
- [#3528](https://github.com/influxdata/telegraf/pull/3528): Add option to amqp output to publish persistent messages.
- [#3530](https://github.com/influxdata/telegraf/pull/3530): Support I (idle) process state on procfs+Linux.
### Bugfixes
- [#3136](https://github.com/influxdata/telegraf/issues/3136): Fix webhooks input address in use during reload.
- [#3258](https://github.com/influxdata/telegraf/issues/3258): Unlock Statsd when stopping to prevent deadlock.
- [#3319](https://github.com/influxdata/telegraf/issues/3319): Fix cloudwatch output requires unneeded permissions.
- [#3351](https://github.com/influxdata/telegraf/issues/3351): Fix prometheus passthrough for existing value types.
- [#3430](https://github.com/influxdata/telegraf/issues/3430): Always ignore autofs filesystems in disk input.
- [#3326](https://github.com/influxdata/telegraf/issues/3326): Fail metrics parsing on unescaped quotes.
- [#3473](https://github.com/influxdata/telegraf/pull/3473): Whitelist allowed char classes for graphite output.
- [#3488](https://github.com/influxdata/telegraf/pull/3488): Use hexadecimal ids and lowercase names in zipkin input.
- [#3263](https://github.com/influxdata/telegraf/issues/3263): Fix snmp-tools output parsing with Windows EOLs.
- [#3447](https://github.com/influxdata/telegraf/issues/3447): Add shadow-utils dependency to rpm package.
- [#3448](https://github.com/influxdata/telegraf/issues/3448): Use deb-systemd-invoke to restart service.
- [#3553](https://github.com/influxdata/telegraf/issues/3553): Fix kafka_consumer outside range of offsets error.
- [#3568](https://github.com/influxdata/telegraf/issues/3568): Fix separation of multiple prometheus_client outputs.
- [#3577](https://github.com/influxdata/telegraf/issues/3577): Don't add system input uptime_format as a counter.
## v1.4.5 [2017-12-01]
### Bugfixes
- [#3500](https://github.com/influxdata/telegraf/issues/3500): Fix global variable collection when using interval_slow option in mysql input.
- [#3486](https://github.com/influxdata/telegraf/issues/3486): Fix error getting net connections info in netstat input.
- [#3529](https://github.com/influxdata/telegraf/issues/3529): Fix HOST_MOUNT_PREFIX in docker with disk input.
## v1.4.4 [2017-11-08]
### Bugfixes
- [#3401](https://github.com/influxdata/telegraf/pull/3401): Use schema specified in mqtt_consumer input.
- [#3419](https://github.com/influxdata/telegraf/issues/3419): Redact datadog API key in log output.
- [#3311](https://github.com/influxdata/telegraf/issues/3311): Fix error getting pids in netstat input.
- [#3339](https://github.com/influxdata/telegraf/issues/3339): Support HOST_VAR envvar to locate /var in system input.
- [#3383](https://github.com/influxdata/telegraf/issues/3383): Use current time if docker container read time is zero value.
## v1.4.3 [2017-10-25]
### Bugfixes
- [#3327](https://github.com/influxdata/telegraf/issues/3327): Fix container name filters in docker input.
- [#3321](https://github.com/influxdata/telegraf/issues/3321): Fix snmpwalk address format in leofs input.
- [#3329](https://github.com/influxdata/telegraf/issues/3329): Fix case sensitivity issue in sqlserver query.
- [#3342](https://github.com/influxdata/telegraf/pull/3342): Fix CPU input plugin stuck after suspend on Linux.
- [#3013](https://github.com/influxdata/telegraf/issues/3013): Fix mongodb input panic when restarting mongodb.
- [#3224](https://github.com/influxdata/telegraf/pull/3224): Preserve url path prefix in influx output.
- [#3354](https://github.com/influxdata/telegraf/pull/3354): Fix TELEGRAF_OPTS expansion in systemd service unit.
- [#3357](https://github.com/influxdata/telegraf/issues/3357): Remove warning when JSON contains null value.
- [#3375](https://github.com/influxdata/telegraf/issues/3375): Fix ACL token usage in consul input plugin.
- [#3369](https://github.com/influxdata/telegraf/issues/3369): Fix unquoting error with Tomcat 6.
- [#3373](https://github.com/influxdata/telegraf/issues/3373): Fix syscall panic in diskio on some Linux systems.
## v1.4.2 [2017-10-10]
### Bugfixes
- [#3259](https://github.com/influxdata/telegraf/issues/3259): Fix error if int larger than 32-bit in /proc/vmstat.
- [#3265](https://github.com/influxdata/telegraf/issues/3265): Fix parsing of JSON with a UTF8 BOM in httpjson.
- [#2887](https://github.com/influxdata/telegraf/issues/2887): Allow JSON data format to contain zero metrics.
- [#3284](https://github.com/influxdata/telegraf/issues/3284): Fix format of connection_timeout in mqtt_consumer.
- [#3081](https://github.com/influxdata/telegraf/issues/3081): Fix case sensitivity error in sqlserver input.
- [#3297](https://github.com/influxdata/telegraf/issues/3297): Add support for proxy environment variables to http_response.
- [#1588](https://github.com/influxdata/telegraf/issues/1588): Add support for standard proxy env vars in outputs.
- [#3282](https://github.com/influxdata/telegraf/issues/3282): Fix panic in cpu input if number of cpus changes.
- [#2854](https://github.com/influxdata/telegraf/issues/2854): Use chunked transfer encoding in InfluxDB output.
## v1.4.1 [2017-09-26]
### Bugfixes
- [#3167](https://github.com/influxdata/telegraf/issues/3167): Fix MQTT input exits if Broker is not available on startup.
- [#3217](https://github.com/influxdata/telegraf/issues/3217): Fix optional field value conversions in fluentd input.
- [#3227](https://github.com/influxdata/telegraf/issues/3227): Whitelist allowed char classes for opentsdb output.
- [#3232](https://github.com/influxdata/telegraf/issues/3232): Fix counter and gauge metric types.
- [#3235](https://github.com/influxdata/telegraf/issues/3235): Fix skipped line with empty target in iptables.
- [#3175](https://github.com/influxdata/telegraf/issues/3175): Fix duplicate keys in perf counters sqlserver query.
- [#3230](https://github.com/influxdata/telegraf/issues/3230): Fix panic in statsd p100 calculation.
- [#3242](https://github.com/influxdata/telegraf/issues/3242): Fix arm64 packages contain 32-bit executable.
## v1.4 [2017-09-05]
### Release Notes
- The `kafka_consumer` input has been updated to support Kafka 0.9 and
above style consumer offset handling. The previous version of this plugin
supporting Kafka 0.8 and below is available as the `kafka_consumer_legacy`
plugin.
- In the `aerospike` input the `node_name` field has been changed to be a tag
for both the `aerospike_node` and `aerospike_namespace` measurements.
- The default prometheus_client port has been changed to 9273.
### New Plugins
- [fail2ban](./plugins/inputs/fail2ban/README.md) - Thanks to @grugrut
- [fluentd](./plugins/inputs/fluentd/README.md) - Thanks to @DanKans
- [histogram](./plugins/aggregators/histogram/README.md) - Thanks to @vlamug
- [minecraft](./plugins/inputs/minecraft/README.md) - Thanks to @adamperlin & @Ayrdrie
- [openldap](./plugins/inputs/openldap/README.md) - Thanks to @cobaugh
- [salesforce](./plugins/inputs/salesforce/README.md) - Thanks to @rody
- [tomcat](./plugins/inputs/tomcat/README.md) - Thanks to @mlindes
- [win_services](./plugins/inputs/win_services/README.md) - Thanks to @vlastahajek
- [zipkin](./plugins/inputs/zipkin/README.md) - Thanks to @adamperlin & @Ayrdrie
### Features
- [#2487](https://github.com/influxdata/telegraf/pull/2487): Add Kafka 0.9+ consumer support
- [#2773](https://github.com/influxdata/telegraf/pull/2773): Add support for self-signed certs to InfluxDB input plugin
- [#2293](https://github.com/influxdata/telegraf/pull/2293): Add TCP listener for statsd input
- [#2581](https://github.com/influxdata/telegraf/pull/2581): Add Docker container environment variables as tags. Only whitelisted
- [#2817](https://github.com/influxdata/telegraf/pull/2817): Add timeout option to IPMI sensor plugin
- [#2883](https://github.com/influxdata/telegraf/pull/2883): Add support for an optional SSL/TLS configuration to nginx input plugin
- [#2882](https://github.com/influxdata/telegraf/pull/2882): Add timezone support for logparser timestamps.
- [#2814](https://github.com/influxdata/telegraf/pull/2814): Add result_type field for http_response input.
- [#2734](https://github.com/influxdata/telegraf/pull/2734): Add include/exclude filters for docker containers.
- [#2602](https://github.com/influxdata/telegraf/pull/2602): Add secure connection support to graphite output.
- [#2908](https://github.com/influxdata/telegraf/pull/2908): Add min/max response time on linux/darwin to ping.
- [#2929](https://github.com/influxdata/telegraf/pull/2929): Add HTTP Proxy support to influxdb output.
- [#2933](https://github.com/influxdata/telegraf/pull/2933): Add standard SSL options to mysql input.
- [#2875](https://github.com/influxdata/telegraf/pull/2875): Add input plugin for fail2ban.
- [#2924](https://github.com/influxdata/telegraf/pull/2924): Support HOST_PROC in processes and linux_sysctl_fs inputs.
- [#2960](https://github.com/influxdata/telegraf/pull/2960): Add Minecraft input plugin.
- [#2963](https://github.com/influxdata/telegraf/pull/2963): Add support for RethinkDB 1.0 handshake protocol.
- [#2943](https://github.com/influxdata/telegraf/pull/2943): Add optional usage_active and time_active CPU metrics.
- [#2973](https://github.com/influxdata/telegraf/pull/2973): Change default prometheus_client port.
- [#2661](https://github.com/influxdata/telegraf/pull/2661): Add fluentd input plugin.
- [#2990](https://github.com/influxdata/telegraf/pull/2990): Add result_type field to net_response input plugin.
- [#2571](https://github.com/influxdata/telegraf/pull/2571): Add read timeout to socket_listener
- [#2612](https://github.com/influxdata/telegraf/pull/2612): Add input plugin for OpenLDAP.
- [#3042](https://github.com/influxdata/telegraf/pull/3042): Add network option to dns_query.
- [#3054](https://github.com/influxdata/telegraf/pull/3054): Add redis_version field to redis input.
- [#3063](https://github.com/influxdata/telegraf/pull/3063): Add tls options to docker input.
- [#2387](https://github.com/influxdata/telegraf/pull/2387): Add histogram aggregator plugin.
- [#3080](https://github.com/influxdata/telegraf/pull/3080): Add zipkin input plugin.
- [#3023](https://github.com/influxdata/telegraf/pull/3023): Add Windows Services input plugin.
- [#3098](https://github.com/influxdata/telegraf/pull/3098): Add path tag to logparser containing path of logfile.
- [#3075](https://github.com/influxdata/telegraf/pull/3075): Add salesforce input plugin.
- [#3097](https://github.com/influxdata/telegraf/pull/3097): Add option to run varnish under sudo.
- [#3119](https://github.com/influxdata/telegraf/pull/3119): Add weighted_io_time to diskio input.
- [#2978](https://github.com/influxdata/telegraf/pull/2978): Add gzip content-encoding support to influxdb output.
- [#3127](https://github.com/influxdata/telegraf/pull/3127): Allow using system plugin in Windows.
- [#3112](https://github.com/influxdata/telegraf/pull/3112): Add tomcat input plugin.
- [#3182](https://github.com/influxdata/telegraf/pull/3182): HTTP headers can be added to InfluxDB output.
### Bugfixes
- [#2607](https://github.com/influxdata/telegraf/issues/2607): Improve logging of errors in Cassandra input.
- [#2819](https://github.com/influxdata/telegraf/pull/2819): [enh] set db_version at 0 if query version fails
- [#2749](https://github.com/influxdata/telegraf/pull/2749): Fixed sqlserver input to work with case sensitive server collation.
- [#2716](https://github.com/influxdata/telegraf/pull/2716): Systemd does not see all shutdowns as failures
- [#2782](https://github.com/influxdata/telegraf/pull/2782): Reuse transports in input plugins
- [#2815](https://github.com/influxdata/telegraf/issues/2815): Inputs processes fails with "no such process".
- [#1137](https://github.com/influxdata/telegraf/issues/1137): Fix multiple plugin loading in win_perf_counters.
- [#2855](https://github.com/influxdata/telegraf/pull/2855): MySQL input: log and continue on field parse error.
- [#2885](https://github.com/influxdata/telegraf/pull/2885): Fix timeout option in Windows ping input sample configuration.
- [#2911](https://github.com/influxdata/telegraf/issues/2911): Fix Kinesis output plugin in govcloud.
- [#2917](https://github.com/influxdata/telegraf/issues/2917): Fix Aerospike input adds all nodes to a single series.
- [#2452](https://github.com/influxdata/telegraf/pull/2452): Improve Prometheus Client output documentation.
- [#2984](https://github.com/influxdata/telegraf/pull/2984): Display error message if prometheus output fails to listen.
- [#2997](https://github.com/influxdata/telegraf/issues/2997): Fix elasticsearch output content type detection warning.
- [#2914](https://github.com/influxdata/telegraf/issues/2914): Prevent possible deadlock when using aggregators.
- [#2860](https://github.com/influxdata/telegraf/issues/2860): Fix combined tagdrop/tagpass filtering.
- [#3036](https://github.com/influxdata/telegraf/pull/3036): Fix filtering when both pass and drop match an item.
- [#2964](https://github.com/influxdata/telegraf/issues/2964): Only report cpu usage for online cpus in docker input.
- [#3050](https://github.com/influxdata/telegraf/pull/3050): Start first aggregator period at startup time.
- [#2906](https://github.com/influxdata/telegraf/issues/2906): Fix panic in logparser if file cannot be opened.
- [#2886](https://github.com/influxdata/telegraf/issues/2886): Default to localhost if zookeeper has no servers set.
- [#2457](https://github.com/influxdata/telegraf/issues/2457): Fix docker memory and cpu reporting in Windows.
- [#3058](https://github.com/influxdata/telegraf/issues/3058): Allow iptable entries with trailing text.
- [#1680](https://github.com/influxdata/telegraf/issues/1680): Sanitize password from couchbase metric.
- [#3104](https://github.com/influxdata/telegraf/issues/3104): Converge to typed value in prometheus output.
- [#2899](https://github.com/influxdata/telegraf/issues/2899): Skip compilcation of logparser and tail on solaris.
- [#2951](https://github.com/influxdata/telegraf/issues/2951): Discard logging from tail library.
- [#3126](https://github.com/influxdata/telegraf/pull/3126): Remove log message on ping timeout.
- [#3144](https://github.com/influxdata/telegraf/issues/3144): Don't retry points beyond retention policy.
- [#3015](https://github.com/influxdata/telegraf/issues/3015): Don't start Telegraf on install in Amazon Linux.
- [#3153](https://github.com/influxdata/telegraf/issues/3053): Enable hddtemp input on all platforms.
- [#3142](https://github.com/influxdata/telegraf/issues/3142): Escape backslash within string fields.
- [#3162](https://github.com/influxdata/telegraf/issues/3162): Fix parsing of SHM remotes in ntpq input
- [#3149](https://github.com/influxdata/telegraf/issues/3149): Don't fail parsing zpool stats if pool health is UNAVAIL on FreeBSD.
- [#2672](https://github.com/influxdata/telegraf/issues/2672): Fix NSQ input plugin when used with version 1.0.0-compat.
- [#2523](https://github.com/influxdata/telegraf/issues/2523): Added CloudWatch metric constraint validation.
- [#3179](https://github.com/influxdata/telegraf/issues/3179): Skip non-numerical values in graphite format.
- [#3187](https://github.com/influxdata/telegraf/issues/3187): Fix panic when handling string fields with escapes.
## v1.3.5 [2017-07-26]
### Bugfixes
- [#3049](https://github.com/influxdata/telegraf/issues/3049): Fix prometheus output cannot be reloaded.
- [#3037](https://github.com/influxdata/telegraf/issues/3037): Fix filestat reporting exists when cannot list directory.
- [#2386](https://github.com/influxdata/telegraf/issues/2386): Fix ntpq parse issue when using dns_lookup.
- [#2554](https://github.com/influxdata/telegraf/issues/2554): Fix panic when agent.interval = "0s".
## v1.3.4 [2017-07-12]
### Bugfixes
- [#3001](https://github.com/influxdata/telegraf/issues/3001): Fix handling of escape characters within fields.
- [#2988](https://github.com/influxdata/telegraf/issues/2988): Fix chrony plugin does not track system time offset.
- [#3004](https://github.com/influxdata/telegraf/issues/3004): Do not allow metrics with trailing slashes.
- [#3011](https://github.com/influxdata/telegraf/issues/3011): Prevent Write from being called concurrently.
## v1.3.3 [2017-06-28]
### Bugfixes
- [#2915](https://github.com/influxdata/telegraf/issues/2915): Allow dos line endings in tail and logparser.
- [#2937](https://github.com/influxdata/telegraf/issues/2937): Remove label value sanitization in prometheus output.
- [#2948](https://github.com/influxdata/telegraf/issues/2948): Fix bug parsing default timestamps with modified precision.
- [#2954](https://github.com/influxdata/telegraf/issues/2954): Fix panic in elasticsearch input if cannot determine master.
## v1.3.2 [2017-06-14]
### Bugfixes
- [#2862](https://github.com/influxdata/telegraf/issues/2862): Fix InfluxDB UDP metric splitting.
- [#2888](https://github.com/influxdata/telegraf/issues/2888): Fix mongodb/leofs urls without scheme.
- [#2822](https://github.com/influxdata/telegraf/issues/2822): Fix inconsistent label dimensions in prometheus output.
## v1.3.1 [2017-05-31]
### Bugfixes
- [#2749](https://github.com/influxdata/telegraf/pull/2749): Fixed sqlserver input to work with case sensitive server collation.
- [#2782](https://github.com/influxdata/telegraf/pull/2782): Reuse transports in input plugins
- [#2815](https://github.com/influxdata/telegraf/issues/2815): Inputs processes fails with "no such process".
- [#2851](https://github.com/influxdata/telegraf/pull/2851): Fix InfluxDB output database quoting.
- [#2856](https://github.com/influxdata/telegraf/issues/2856): Fix net input on older Linux kernels.
- [#2848](https://github.com/influxdata/telegraf/pull/2848): Fix panic in mongo input.
- [#2869](https://github.com/influxdata/telegraf/pull/2869): Fix length calculation of split metric buffer.
## v1.3 [2017-05-15]
### Release Notes
- Users of the windows `ping` plugin will need to drop or migrate their
measurements in order to continue using the plugin. The reason for this is that
the windows plugin was outputting a different type than the linux plugin. This
made it impossible to use the `ping` plugin for both windows and linux
machines.
- Ceph: the `ceph_pgmap_state` metric content has been modified to use a unique field `count`, with each state expressed as a `state` tag.
Telegraf < 1.3:
```
# field_name value
active+clean 123
active+clean+scrubbing 3
```
Telegraf >= 1.3:
```
# field_name value tag
count 123 state=active+clean
count 3 state=active+clean+scrubbing
```
- The [Riemann output plugin](./plugins/outputs/riemann) has been rewritten
and the previous riemann plugin is _incompatible_ with the new one. The reasons
for this are outlined in issue [#1878](https://github.com/influxdata/telegraf/issues/1878).
The previous riemann output will still be available using
`outputs.riemann_legacy` if needed, but that will eventually be deprecated.
It is highly recommended that all users migrate to the new riemann output plugin.
- Generic [socket_listener](./plugins/inputs/socket_listener) and
[socket_writer](./plugins/outputs/socket_writer) plugins have been implemented
for receiving and sending UDP, TCP, unix, & unix-datagram data. These plugins
will replace udp_listener and tcp_listener, which are still available but will
be deprecated eventually.
### Features
- [#2721](https://github.com/influxdata/telegraf/pull/2721): Added SASL options for kafka output plugin.
- [#2723](https://github.com/influxdata/telegraf/pull/2723): Added SSL configuration for input haproxy.
- [#2494](https://github.com/influxdata/telegraf/pull/2494): Add interrupts input plugin.
- [#2094](https://github.com/influxdata/telegraf/pull/2094): Add generic socket listener & writer.
- [#2204](https://github.com/influxdata/telegraf/pull/2204): Extend http_response to support searching for a substring in response. Return 1 if found, else 0.
- [#2137](https://github.com/influxdata/telegraf/pull/2137): Added userstats to mysql input plugin.
- [#2179](https://github.com/influxdata/telegraf/pull/2179): Added more InnoDB metric to MySQL plugin.
- [#2229](https://github.com/influxdata/telegraf/pull/2229): `ceph_pgmap_state` metric now uses a single field `count`, with PG state published as `state` tag.
- [#2251](https://github.com/influxdata/telegraf/pull/2251): InfluxDB output: use own client for improved through-put and less allocations.
- [#2330](https://github.com/influxdata/telegraf/pull/2330): Keep -config-directory when running as Windows service.
- [#1900](https://github.com/influxdata/telegraf/pull/1900): Riemann plugin rewrite.
- [#1453](https://github.com/influxdata/telegraf/pull/1453): diskio: add support for name templates and udev tags.
- [#2277](https://github.com/influxdata/telegraf/pull/2277): add integer metrics for Consul check health state.
- [#2201](https://github.com/influxdata/telegraf/pull/2201): Add lock option to the IPtables input plugin.
- [#2244](https://github.com/influxdata/telegraf/pull/2244): Support ipmi_sensor plugin querying local ipmi sensors.
- [#2339](https://github.com/influxdata/telegraf/pull/2339): Increment gather_errors for all errors emitted by inputs.
- [#2071](https://github.com/influxdata/telegraf/issues/2071): Use official docker SDK.
- [#1678](https://github.com/influxdata/telegraf/pull/1678): Add AMQP consumer input plugin
- [#2512](https://github.com/influxdata/telegraf/pull/2512): Added pprof tool.
- [#2501](https://github.com/influxdata/telegraf/pull/2501): Support DEAD(X) state in system input plugin.
- [#2522](https://github.com/influxdata/telegraf/pull/2522): Add support for mongodb client certificates.
- [#1948](https://github.com/influxdata/telegraf/pull/1948): Support adding SNMP table indexes as tags.
- [#2332](https://github.com/influxdata/telegraf/pull/2332): Add Elasticsearch 5.x output
- [#2587](https://github.com/influxdata/telegraf/pull/2587): Add json timestamp units configurability
- [#2597](https://github.com/influxdata/telegraf/issues/2597): Add support for Linux sysctl-fs metrics.
- [#2425](https://github.com/influxdata/telegraf/pull/2425): Support to include/exclude docker container labels as tags
- [#1667](https://github.com/influxdata/telegraf/pull/1667): dmcache input plugin
- [#2637](https://github.com/influxdata/telegraf/issues/2637): Add support for precision in http_listener
- [#2636](https://github.com/influxdata/telegraf/pull/2636): Add `message_len_max` option to `kafka_consumer` input
- [#1100](https://github.com/influxdata/telegraf/issues/1100): Add collectd parser
- [#1820](https://github.com/influxdata/telegraf/issues/1820): easier plugin testing without outputs
- [#2493](https://github.com/influxdata/telegraf/pull/2493): Check signature in the GitHub webhook plugin
- [#2038](https://github.com/influxdata/telegraf/issues/2038): Add papertrail support to webhooks
- [#2253](https://github.com/influxdata/telegraf/pull/2253): Change jolokia plugin to use bulk requests.
- [#2575](https://github.com/influxdata/telegraf/issues/2575) Add diskio input for Darwin
- [#2705](https://github.com/influxdata/telegraf/pull/2705): Kinesis output: add use_random_partitionkey option
- [#2635](https://github.com/influxdata/telegraf/issues/2635): add tcp keep-alive to socket_listener & socket_writer
- [#2031](https://github.com/influxdata/telegraf/pull/2031): Add Kapacitor input plugin
- [#2732](https://github.com/influxdata/telegraf/pull/2732): Use go 1.8.1
- [#2712](https://github.com/influxdata/telegraf/issues/2712): Documentation for rabbitmq input plugin
- [#2141](https://github.com/influxdata/telegraf/pull/2141): Logparser handles newly-created files.
### Bugfixes
- [#2633](https://github.com/influxdata/telegraf/pull/2633): ipmi_sensor: allow @ symbol in password
- [#2077](https://github.com/influxdata/telegraf/issues/2077): SQL Server Input - Arithmetic overflow error converting numeric to data type int.
- [#2262](https://github.com/influxdata/telegraf/issues/2262): Flush jitter can inhibit metric collection.
- [#2318](https://github.com/influxdata/telegraf/issues/2318): haproxy input - Add missing fields.
- [#2287](https://github.com/influxdata/telegraf/issues/2287): Kubernetes input: Handle null startTime for stopped pods.
- [#2356](https://github.com/influxdata/telegraf/issues/2356): cpu input panic when /proc/stat is empty.
- [#2341](https://github.com/influxdata/telegraf/issues/2341): telegraf swallowing panics in --test mode.
- [#2358](https://github.com/influxdata/telegraf/pull/2358): Create pidfile with 644 permissions & defer file deletion.
- [#2360](https://github.com/influxdata/telegraf/pull/2360): Fixed install/remove of telegraf on non-systemd Debian/Ubuntu systems
- [#2282](https://github.com/influxdata/telegraf/issues/2282): Reloading telegraf freezes prometheus output.
- [#2390](https://github.com/influxdata/telegraf/issues/2390): Empty tag value causes error on InfluxDB output.
- [#2380](https://github.com/influxdata/telegraf/issues/2380): buffer_size field value is negative number from "internal" plugin.
- [#2414](https://github.com/influxdata/telegraf/issues/2414): Missing error handling in the MySQL plugin leads to segmentation violation.
- [#2462](https://github.com/influxdata/telegraf/pull/2462): Fix type conflict in windows ping plugin.
- [#2178](https://github.com/influxdata/telegraf/issues/2178): logparser: regexp with lookahead.
- [#2466](https://github.com/influxdata/telegraf/issues/2466): Telegraf can crash in LoadDirectory on 0600 files.
- [#2215](https://github.com/influxdata/telegraf/issues/2215): Iptables input: document better that rules without a comment are ignored.
- [#2483](https://github.com/influxdata/telegraf/pull/2483): Fix win_perf_counters capping values at 100.
- [#2498](https://github.com/influxdata/telegraf/pull/2498): Exporting Ipmi.Path to be set by config.
- [#2500](https://github.com/influxdata/telegraf/pull/2500): Remove warning if parse empty content
- [#2520](https://github.com/influxdata/telegraf/pull/2520): Update default value for Cloudwatch rate limit
- [#2513](https://github.com/influxdata/telegraf/issues/2513): create /etc/telegraf/telegraf.d directory in tarball.
- [#2541](https://github.com/influxdata/telegraf/issues/2541): Return error on unsupported serializer data format.
- [#1827](https://github.com/influxdata/telegraf/issues/1827): Fix Windows Performance Counters multi instance identifier
- [#2576](https://github.com/influxdata/telegraf/pull/2576): Add write timeout to Riemann output
- [#2596](https://github.com/influxdata/telegraf/pull/2596): fix timestamp parsing on prometheus plugin
- [#2610](https://github.com/influxdata/telegraf/pull/2610): Fix deadlock when output cannot write
- [#2410](https://github.com/influxdata/telegraf/issues/2410): Fix connection leak in postgresql.
- [#2628](https://github.com/influxdata/telegraf/issues/2628): Set default measurement name for snmp input.
- [#2649](https://github.com/influxdata/telegraf/pull/2649): Improve performance of diskio with many disks
- [#2671](https://github.com/influxdata/telegraf/issues/2671): The internal input plugin uses the wrong units for `heap_objects`
- [#2684](https://github.com/influxdata/telegraf/pull/2684): Fix ipmi_sensor config is shared between all plugin instances
- [#2450](https://github.com/influxdata/telegraf/issues/2450): Network statistics not collected when system has alias interfaces
- [#1911](https://github.com/influxdata/telegraf/issues/1911): Sysstat plugin needs LANG=C or similar locale
- [#2528](https://github.com/influxdata/telegraf/issues/2528): File output closes standard streams on reload.
- [#2603](https://github.com/influxdata/telegraf/issues/2603): AMQP output disconnect blocks all outputs
- [#2706](https://github.com/influxdata/telegraf/issues/2706): Improve documentation for redis input plugin
## v1.2.1 [2017-02-01]
### Bugfixes
- [#2317](https://github.com/influxdata/telegraf/issues/2317): Fix segfault on nil metrics with influxdb output.
- [#2324](https://github.com/influxdata/telegraf/issues/2324): Fix negative number handling.
### Features
- [#2348](https://github.com/influxdata/telegraf/pull/2348): Go version 1.7.4 -> 1.7.5
## v1.2 [2017-01-00]
### Release Notes
@@ -54,7 +711,7 @@ plugins, not just statsd.
- [#1775](https://github.com/influxdata/telegraf/issues/1775): Cache & expire metrics for delivery to prometheus.
- [#2146](https://github.com/influxdata/telegraf/issues/2146): Fix potential panic in aggregator plugin metric maker.
- [#1843](https://github.com/influxdata/telegraf/pull/1843) & [#1668](https://github.com/influxdata/telegraf/issues/1668): Add optional ability to define PID as a tag.
- [#1730](https://github.com/influxdata/telegraf/issues/1730): Fix win_perf_counters not gathering non-English counters.
- [#1730](https://github.com/influxdata/telegraf/issues/1730) & [#2261](https://github.com/influxdata/telegraf/pull/2261): Fix win_perf_counters not gathering non-English counters.
- [#2061](https://github.com/influxdata/telegraf/issues/2061): Fix panic when file stat info cannot be collected due to permissions or other issue(s).
- [#2045](https://github.com/influxdata/telegraf/issues/2045): Graylog output should set short_message field.
- [#1904](https://github.com/influxdata/telegraf/issues/1904): Hddtemp always put the value in the field temperature.
@@ -67,7 +724,10 @@ plugins, not just statsd.
- [#1973](https://github.com/influxdata/telegraf/issues/1973): Partial fix: logparser CLF pattern with IPv6 addresses.
- [#1975](https://github.com/influxdata/telegraf/issues/1975) & [#2102](https://github.com/influxdata/telegraf/issues/2102): Fix thread-safety when using multiple instances of the statsd input plugin.
- [#2027](https://github.com/influxdata/telegraf/issues/2027): docker input: interface conversion panic fix.
- [#1814](https://github.com/influxdata/telegraf/issues/1814): snmp: ensure proper context is present on error messages
- [#1814](https://github.com/influxdata/telegraf/issues/1814): snmp: ensure proper context is present on error messages.
- [#2299](https://github.com/influxdata/telegraf/issues/2299): opentsdb: add tcp:// prefix if no scheme provided.
- [#2297](https://github.com/influxdata/telegraf/issues/2297): influx parser: parse line-protocol without newlines.
- [#2245](https://github.com/influxdata/telegraf/issues/2245): influxdb output: fix field type conflict blocking output buffer.
## v1.1.2 [2016-12-12]
@@ -218,8 +878,11 @@ which can be installed via
evaluated at every flush interval, rather than once at startup. This makes it
consistent with the behavior of `collection_jitter`.
- postgresql plugins now handle oid and name typed columns seamlessly, previously they were ignored/skipped.
### Features
- [#1617](https://github.com/influxdata/telegraf/pull/1617): postgresql_extensible now handles name and oid types correctly.
- [#1413](https://github.com/influxdata/telegraf/issues/1413): Separate container_version from container_image tag.
- [#1525](https://github.com/influxdata/telegraf/pull/1525): Support setting per-device and total metrics for Docker network and blockio.
- [#1466](https://github.com/influxdata/telegraf/pull/1466): MongoDB input plugin: adding per DB stats from db.stats()

View File

@@ -12,7 +12,7 @@ but any information you can provide on how the data will look is appreciated.
See the [OpenTSDB output](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/opentsdb)
for a good example.
1. **Optional:** Help users of your plugin by including example queries for populating dashboards. Include these sample queries in the `README.md` for the plugin.
1. **Optional:** Write a [tickscript](https://docs.influxdata.com/kapacitor/v1.0/tick/syntax/) for your plugin and add it to [Kapacitor](https://github.com/influxdata/kapacitor/tree/master/examples/telegraf). Or mention @jackzampolin in a PR comment with some common queries that you would want to alert on and he will write one for you.
1. **Optional:** Write a [tickscript](https://docs.influxdata.com/kapacitor/v1.0/tick/syntax/) for your plugin and add it to [Kapacitor](https://github.com/influxdata/kapacitor/tree/master/examples/telegraf).
## GoDoc
@@ -52,7 +52,7 @@ See below for a quick example.
* Input Plugins must be added to the
`github.com/influxdata/telegraf/plugins/inputs/all/all.go` file.
* The `SampleConfig` function should return valid toml that describes how the
plugin can be configured. This is include in `telegraf -sample-config`.
plugin can be configured. This is include in `telegraf config`.
* The `Description` function should say in one line what this plugin does.
Let's say you've written a plugin that emits metrics about processes on the
@@ -79,7 +79,10 @@ func (s *Simple) Description() string {
}
func (s *Simple) SampleConfig() string {
return "ok = true # indicate if everything is fine"
return `
## Indicate if everything is fine
ok = true
`
}
func (s *Simple) Gather(acc telegraf.Accumulator) error {
@@ -124,7 +127,7 @@ You should also add the following to your SampleConfig() return:
```toml
## Data format to consume.
## Each data format has it's own unique set of configuration options, read
## Each data format has its own unique set of configuration options, read
## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
data_format = "influx"
@@ -167,7 +170,7 @@ and `Stop()` methods.
### Service Plugin Guidelines
* Same as the `Plugin` guidelines, except that they must conform to the
`inputs.ServiceInput` interface.
[`telegraf.ServiceInput`](https://godoc.org/github.com/influxdata/telegraf#ServiceInput) interface.
## Output Plugins
@@ -183,7 +186,7 @@ See below for a quick example.
* To be available within Telegraf itself, plugins must add themselves to the
`github.com/influxdata/telegraf/plugins/outputs/all/all.go` file.
* The `SampleConfig` function should return valid toml that describes how the
output can be configured. This is include in `telegraf -sample-config`.
output can be configured. This is include in `telegraf config`.
* The `Description` function should say in one line what this output does.
### Output Example
@@ -207,7 +210,9 @@ func (s *Simple) Description() string {
}
func (s *Simple) SampleConfig() string {
return "url = localhost"
return `
ok = true
`
}
func (s *Simple) Connect() error {
@@ -254,7 +259,7 @@ You should also add the following to your SampleConfig() return:
```toml
## Data format to output.
## Each data format has it's own unique set of configuration options, read
## Each data format has its own unique set of configuration options, read
## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
data_format = "influx"
@@ -287,7 +292,7 @@ See below for a quick example.
* To be available within Telegraf itself, plugins must add themselves to the
`github.com/influxdata/telegraf/plugins/processors/all/all.go` file.
* The `SampleConfig` function should return valid toml that describes how the
processor can be configured. This is include in `telegraf -sample-config`.
processor can be configured. This is include in the output of `telegraf config`.
* The `Description` function should say in one line what this processor does.
### Processor Example
@@ -344,7 +349,7 @@ See below for a quick example.
* To be available within Telegraf itself, plugins must add themselves to the
`github.com/influxdata/telegraf/plugins/aggregators/all/all.go` file.
* The `SampleConfig` function should return valid toml that describes how the
aggregator can be configured. This is include in `telegraf -sample-config`.
aggregator can be configured. This is include in `telegraf config`.
* The `Description` function should say in one line what this aggregator does.
* The Aggregator plugin will need to keep caches of metrics that have passed
through it. This should be done using the builtin `HashID()` function of each
@@ -457,29 +462,28 @@ func init() {
## Unit Tests
Before opening a pull request you should run the linter checks and
the short tests.
### Execute linter
execute `make lint`
### Execute short tests
execute `make test-short`
execute `make test`
### Execute long tests
### Execute integration tests
As Telegraf collects metrics from several third-party services it becomes a
difficult task to mock each service as some of them have complicated protocols
which would take some time to replicate.
Running the integration tests requires several docker containers to be
running. You can start the containers with:
```
make docker-run
```
To overcome this situation we've decided to use docker containers to provide a
fast and reproducible environment to test those services which require it.
For other situations
(i.e: https://github.com/influxdata/telegraf/blob/master/plugins/inputs/redis/redis_test.go)
a simple mock will suffice.
And run the full test suite with:
```
make test-all
```
To execute Telegraf tests follow these simple steps:
- Install docker following [these](https://docs.docker.com/installation/)
instructions
- execute `make test`
### Unit test troubleshooting
Try cleaning up your test environment by executing `make docker-kill` and
re-running
Use `make docker-kill` to stop the containers.

145
Godeps
View File

@@ -1,65 +1,100 @@
github.com/Shopify/sarama 8aadb476e66ca998f2f6bb3c993e9a2daa3666b9
github.com/Sirupsen/logrus 219c8cb75c258c552e999735be6df753ffc7afdc
github.com/aerospike/aerospike-client-go 7f3a312c3b2a60ac083ec6da296091c52c795c63
github.com/amir/raidman 53c1b967405155bfc8758557863bf2e14f814687
github.com/aws/aws-sdk-go 13a12060f716145019378a10e2806c174356b857
github.com/beorn7/perks 3ac7bf7a47d159a033b107610db8a1b6575507a4
github.com/cenkalti/backoff 4dc77674aceaabba2c7e3da25d4c823edfb73f99
github.com/couchbase/go-couchbase cb664315a324d87d19c879d9cc67fda6be8c2ac1
github.com/couchbase/gomemcached a5ea6356f648fec6ab89add00edd09151455b4b2
collectd.org 2ce144541b8903101fb8f1483cc0497a68798122
github.com/Azure/go-autorest 9ad9326b278af8fa5cc67c30c0ce9a58cc0862b2
github.com/Shopify/sarama 3b1b38866a79f06deddf0487d5c27ba0697ccd65
github.com/Sirupsen/logrus 61e43dc76f7ee59a82bdf3d71033dc12bea4c77d
github.com/aerospike/aerospike-client-go 95e1ad7791bdbca44707fedbb29be42024900d9c
github.com/amir/raidman c74861fe6a7bb8ede0a010ce4485bdbb4fc4c985
github.com/apache/thrift 4aaa92ece8503a6da9bc6701604f69acf2b99d07
github.com/aws/aws-sdk-go c861d27d0304a79f727e9a8a4e2ac1e74602fdc0
github.com/beorn7/perks 4c0e84591b9aa9e6dcfdf3e020114cd81f89d5f9
github.com/bsm/sarama-cluster abf039439f66c1ce78017f560b490612552f6472
github.com/cenkalti/backoff b02f2bbce11d7ea6b97f282ef1771b0fe2f65ef3
github.com/couchbase/go-couchbase bfe555a140d53dc1adf390f1a1d4b0fd4ceadb28
github.com/couchbase/gomemcached 4a25d2f4e1dea9ea7dd76dfd943407abf9b07d29
github.com/couchbase/goutils 5823a0cbaaa9008406021dc5daf80125ea30bba6
github.com/dancannon/gorethink e7cac92ea2bc52638791a021f212145acfedb1fc
github.com/davecgh/go-spew 5215b55f46b2b919f50a1df0eaa5886afe4e3b3d
github.com/docker/engine-api 8924d6900370b4c7e7984be5adc61f50a80d7537
github.com/docker/go-connections f549a9393d05688dff0992ef3efd8bbe6c628aeb
github.com/docker/go-units 5d2041e26a699eaca682e2ea41c8f891e1060444
github.com/davecgh/go-spew 346938d642f2ec3594ed81d874461961cd0faa76
github.com/dgrijalva/jwt-go dbeaa9332f19a944acb5736b4456cfcc02140e29
github.com/docker/docker f5ec1e2936dcbe7b5001c2b817188b095c700c27
github.com/docker/go-connections 990a1a1a70b0da4c4cb70e117971a4f0babfbf1a
github.com/eapache/go-resiliency b86b1ec0dd4209a588dc1285cdd471e73525c0b3
github.com/eapache/queue ded5959c0d4e360646dc9e9908cff48666781367
github.com/eclipse/paho.mqtt.golang 0f7a459f04f13a41b7ed752d47944528d4bf9a86
github.com/go-sql-driver/mysql 1fca743146605a172a266e1654e01e5cd5669bee
github.com/gobwas/glob 49571a1557cd20e6a2410adc6421f85b66c730b5
github.com/golang/protobuf 552c7b9542c194800fd493123b3798ef0a832032
github.com/golang/snappy d9eb7a3d35ec988b8585d4a0068e462c27d28380
github.com/gorilla/context 1ea25387ff6f684839d82767c1733ff4d4d15d0a
github.com/gorilla/mux c9e326e2bdec29039a3761c07bece13133863e1e
github.com/eapache/go-xerial-snappy bb955e01b9346ac19dc29eb16586c90ded99a98c
github.com/eapache/queue 44cc805cf13205b55f69e14bcb69867d1ae92f98
github.com/eclipse/paho.mqtt.golang aff15770515e3c57fc6109da73d42b0d46f7f483
github.com/go-logfmt/logfmt 390ab7935ee28ec6b286364bba9b4dd6410cb3d5
github.com/go-redis/redis 73b70592cdaa9e6abdfcfbf97b4a90d80728c836
github.com/go-sql-driver/mysql 2e00b5cd70399450106cec6431c2e2ce3cae5034
github.com/gobwas/glob bea32b9cd2d6f55753d94a28e959b13f0244797a
github.com/go-ini/ini 9144852efba7c4daf409943ee90767da62d55438
github.com/gogo/protobuf 7b6c6391c4ff245962047fc1e2c6e08b1cdfa0e8
github.com/golang/protobuf 8ee79997227bf9b34611aee7946ae64735e6fd93
github.com/golang/snappy 7db9049039a047d955fe8c19b83c8ff5abd765c7
github.com/go-ole/go-ole be49f7c07711fcb603cff39e1de7c67926dc0ba7
github.com/google/go-cmp f94e52cad91c65a63acc1e75d4be223ea22e99bc
github.com/gorilla/mux 53c1911da2b537f792e7cafcb446b05ffe33b996
github.com/go-redis/redis 73b70592cdaa9e6abdfcfbf97b4a90d80728c836
github.com/go-sql-driver/mysql 2e00b5cd70399450106cec6431c2e2ce3cae5034
github.com/hailocab/go-hostpool e80d13ce29ede4452c43dea11e79b9bc8a15b478
github.com/hashicorp/consul 5aa90455ce78d4d41578bafc86305e6e6b28d7d2
github.com/hpcloud/tail b2940955ab8b26e19d43a43c4da0475dd81bdb56
github.com/influxdata/config b79f6829346b8d6e78ba73544b1e1038f1f1c9da
github.com/influxdata/influxdb fc57c0f7c635df3873f3d64f0ed2100ddc94d5ae
github.com/influxdata/toml af4df43894b16e3fd2b788d01bd27ad0776ef2d0
github.com/hashicorp/consul 63d2fc68239b996096a1c55a0d4b400ea4c2583f
github.com/influxdata/tail c43482518d410361b6c383d7aebce33d0471d7bc
github.com/influxdata/toml 5d1d907f22ead1cd47adde17ceec5bda9cacaf8f
github.com/influxdata/wlog 7c63b0a71ef8300adc255344d275e10e5c3a71ec
github.com/kardianos/osext 29ae4ffbc9a6fe9fb2bc5029050ce6996ea1d3bc
github.com/kardianos/service 5e335590050d6d00f3aa270217d288dda1c94d0a
github.com/fsnotify/fsnotify c2828203cd70a50dcccfb2761f8b1f8ceef9a8e9
github.com/jackc/pgx 63f58fd32edb5684b9e9f4cfaac847c6b42b3917
github.com/jmespath/go-jmespath bd40a432e4c76585ef6b72d3fd96fb9b6dc7b68d
github.com/kardianos/osext c2c54e542fb797ad986b31721e1baedf214ca413
github.com/kardianos/service 6d3a0ee7d3425d9d835debc51a0ca1ffa28f4893
github.com/kballard/go-shellquote d8ec1a69a250a17bb0e419c386eac1f3711dc142
github.com/klauspost/crc32 19b0b332c9e4516a6370a0456e6182c3b5036720
github.com/lib/pq e182dc4027e2ded4b19396d638610f2653295f36
github.com/matttproud/golang_protobuf_extensions d0c3fe89de86839aecf2e0579c40ba3bb336a453
github.com/miekg/dns cce6c130cdb92c752850880fd285bea1d64439dd
github.com/mreiferson/go-snappystream 028eae7ab5c4c9e2d1cb4c4ca1e53259bbe7e504
github.com/matttproud/golang_protobuf_extensions c12348ce28de40eed0136aa2b644d0ee0650e56c
github.com/Microsoft/go-winio ce2922f643c8fd76b46cadc7f404a06282678b34
github.com/miekg/dns 99f84ae56e75126dd77e5de4fae2ea034a468ca1
github.com/mitchellh/mapstructure d0303fe809921458f417bcf828397a65db30a7e4
github.com/multiplay/go-ts3 07477f49b8dfa3ada231afc7b7b17617d42afe8e
github.com/naoina/go-stringutil 6b638e95a32d0c1131db0e7fe83775cbea4a0d0b
github.com/nats-io/nats ea8b4fd12ebb823073c0004b9f09ac8748f4f165
github.com/nats-io/nuid a5152d67cf63cbfb5d992a395458722a45194715
github.com/nsqio/go-nsq 0b80d6f05e15ca1930e0c5e1d540ed627e299980
github.com/nats-io/gnatsd 393bbb7c031433e68707c8810fda0bfcfbe6ab9b
github.com/nats-io/go-nats ea9585611a4ab58a205b9b125ebd74c389a6b898
github.com/nats-io/nats ea9585611a4ab58a205b9b125ebd74c389a6b898
github.com/nats-io/nuid 289cccf02c178dc782430d534e3c1f5b72af807f
github.com/nsqio/go-nsq eee57a3ac4174c55924125bb15eeeda8cffb6e6f
github.com/opencontainers/runc 89ab7f2ccc1e45ddf6485eaa802c35dcf321dfc8
github.com/prometheus/client_golang 18acf9993a863f4c4b40612e19cdd243e7c86831
github.com/opentracing-contrib/go-observer a52f2342449246d5bcc273e65cbdcfa5f7d6c63c
github.com/opentracing/opentracing-go 06f47b42c792fef2796e9681353e1d908c417827
github.com/openzipkin/zipkin-go-opentracing 1cafbdfde94fbf2b373534764e0863aa3bd0bf7b
github.com/pierrec/lz4 5c9560bfa9ace2bf86080bf40d46b34ae44604df
github.com/pierrec/xxHash 5a004441f897722c627870a981d02b29924215fa
github.com/pkg/errors 645ef00459ed84a119197bfb8d8205042c6df63d
github.com/pmezard/go-difflib/difflib 792786c7400a136282c1664665ae0a8db921c6c2
github.com/prometheus/client_golang c317fb74746eac4fc65fe3909195f4cf67c5562a
github.com/prometheus/client_model fa8ad6fec33561be4280a8f0514318c79d7f6cb6
github.com/prometheus/common e8eabff8812b05acf522b45fdcd725a785188e37
github.com/prometheus/procfs 406e5b7bfd8201a36e2bb5f7bdae0b03380c2ce8
github.com/samuel/go-zookeeper 218e9c81c0dd8b3b18172b2bbfad92cc7d6db55f
github.com/shirou/gopsutil 1516eb9ddc5e61ba58874047a98f8b44b5e585e8
github.com/soniah/gosnmp 3fe3beb30fa9700988893c56a63b1df8e1b68c26
github.com/streadway/amqp b4f3ceab0337f013208d31348b578d83c0064744
github.com/stretchr/testify 1f4a1643a57e798696635ea4c126e9127adb7d3c
github.com/vjeantet/grok 83bfdfdfd1a8146795b28e547a8e3c8b28a466c2
github.com/prometheus/common dd2f054febf4a6c00f2343686efb775948a8bff4
github.com/prometheus/procfs 1878d9fbb537119d24b21ca07effd591627cd160
github.com/rcrowley/go-metrics 1f30fe9094a513ce4c700b9a54458bbb0c96996c
github.com/samuel/go-zookeeper 1d7be4effb13d2d908342d349d71a284a7542693
github.com/satori/go.uuid 5bf94b69c6b68ee1b541973bb8e1144db23a194b
github.com/shirou/gopsutil a5c2888e464b14fa882c2a059e0f95716bd45cf1
github.com/shirou/w32 3c9377fc6748f222729a8270fe2775d149a249ad
github.com/Shopify/sarama 3b1b38866a79f06deddf0487d5c27ba0697ccd65
github.com/Sirupsen/logrus 61e43dc76f7ee59a82bdf3d71033dc12bea4c77d
github.com/soniah/gosnmp f15472a4cd6f6ea7929e4c7d9f163c49f059924f
github.com/StackExchange/wmi f3e2bae1e0cb5aef83e319133eabfee30013a4a5
github.com/streadway/amqp 63795daa9a446c920826655f26ba31c81c860fd6
github.com/stretchr/objx facf9a85c22f48d2f52f2380e4efce1768749a89
github.com/stretchr/testify 12b6f73e6084dad08a7c6e575284b177ecafbc71
github.com/tidwall/gjson 0623bd8fbdbf97cc62b98d15108832851a658e59
github.com/tidwall/match 173748da739a410c5b0b813b956f89ff94730b4c
github.com/vjeantet/grok d73e972b60935c7fec0b4ffbc904ed39ecaf7efe
github.com/wvanbergen/kafka bc265fedb9ff5b5c5d3c0fdcef4a819b3523d3ee
github.com/wvanbergen/kazoo-go 0f768712ae6f76454f987c3356177e138df258f8
github.com/yuin/gopher-lua bf3808abd44b1e55143a2d7f08571aaa80db1808
github.com/wvanbergen/kazoo-go 968957352185472eacb69215fa3dbfcfdbac1096
github.com/yuin/gopher-lua 66c871e454fcf10251c61bf8eff02d0978cae75a
github.com/zensqlmonitor/go-mssqldb ffe5510c6fa5e15e6d983210ab501c815b56b363
golang.org/x/crypto c197bcf24cde29d3f73c7b4ac6fd41f4384e8af6
golang.org/x/net 6acef71eb69611914f7a30939ea9f6e194c78172
golang.org/x/text a71fd10341b064c10f4a81ceac72bcf70f26ea34
gopkg.in/dancannon/gorethink.v1 7d1af5be49cb5ecc7b177bf387d232050299d6ef
gopkg.in/fatih/pool.v2 cba550ebf9bce999a02e963296d4bc7a486cb715
gopkg.in/mgo.v2 d90005c5262a3463800497ea5a89aed5fe22c886
gopkg.in/yaml.v2 a83829b6f1293c91addabc89d0571c246397bbf4
golang.org/x/crypto dc137beb6cce2043eb6b5f223ab8bf51c32459f4
golang.org/x/net f2499483f923065a842d38eb4c7f1927e6fc6e6d
golang.org/x/sys 739734461d1c916b6c72a63d7efda2b27edb369f
golang.org/x/text 506f9d5c962f284575e88337e7d9296d27e729d3
gopkg.in/asn1-ber.v1 4e86f4367175e39f69d9358a5f17b4dda270378d
gopkg.in/fatih/pool.v2 6e328e67893eb46323ad06f0e92cb9536babbabc
gopkg.in/gorethink/gorethink.v3 7ab832f7b65573104a555d84a27992ae9ea1f659
gopkg.in/ldap.v2 8168ee085ee43257585e50c6441aadf54ecb2c9f
gopkg.in/mgo.v2 3f83fa5005286a7fe593b055f0d7771a7dce4655
gopkg.in/olivere/elastic.v5 3113f9b9ad37509fe5f8a0e5e91c96fdc4435e26
gopkg.in/tomb.v1 dd632973f1e7218eb1089048e0798ec9ae7dceb8
gopkg.in/yaml.v2 4c78c975fe7c825c6d1466c42be594d1d6f3aba6

View File

@@ -1,11 +0,0 @@
github.com/Microsoft/go-winio ce2922f643c8fd76b46cadc7f404a06282678b34
github.com/StackExchange/wmi f3e2bae1e0cb5aef83e319133eabfee30013a4a5
github.com/go-ole/go-ole be49f7c07711fcb603cff39e1de7c67926dc0ba7
github.com/shirou/w32 3c9377fc6748f222729a8270fe2775d149a249ad
golang.org/x/sys a646d33e2ee3172a661fc09bca23bb4889a41bc8
github.com/go-ini/ini 9144852efba7c4daf409943ee90767da62d55438
github.com/jmespath/go-jmespath bd40a432e4c76585ef6b72d3fd96fb9b6dc7b68d
github.com/pmezard/go-difflib/difflib 792786c7400a136282c1664665ae0a8db921c6c2
github.com/stretchr/objx 1a9d0bb9f541897e62256577b352fdbc1fb4fd94
gopkg.in/fsnotify.v1 a8a77c9133d2d6fd8334f3260d06f60e8d80a5fb
gopkg.in/tomb.v1 dd632973f1e7218eb1089048e0798ec9ae7dceb8

157
Makefile
View File

@@ -1,96 +1,95 @@
VERSION := $(shell sh -c 'git describe --always --tags')
BRANCH := $(shell sh -c 'git rev-parse --abbrev-ref HEAD')
COMMIT := $(shell sh -c 'git rev-parse --short HEAD')
PREFIX := /usr/local
VERSION := $(shell git describe --exact-match --tags 2>/dev/null)
BRANCH := $(shell git rev-parse --abbrev-ref HEAD)
COMMIT := $(shell git rev-parse --short HEAD)
GOFILES ?= $(shell git ls-files '*.go')
GOFMT ?= $(shell gofmt -l $(filter-out plugins/parsers/influx/machine.go, $(GOFILES)))
BUILDFLAGS ?=
ifdef GOBIN
PATH := $(GOBIN):$(PATH)
else
PATH := $(subst :,/bin:,$(GOPATH))/bin:$(PATH)
endif
# Standard Telegraf build
default: prepare build
LDFLAGS := $(LDFLAGS) -X main.commit=$(COMMIT) -X main.branch=$(BRANCH)
ifdef VERSION
LDFLAGS += -X main.version=$(VERSION)
endif
# Windows build
windows: prepare-windows build-windows
all:
$(MAKE) deps
$(MAKE) telegraf
# Only run the build (no dependency grabbing)
build:
go install -ldflags \
"-X main.version=$(VERSION) -X main.commit=$(COMMIT) -X main.branch=$(BRANCH)" ./...
build-windows:
GOOS=windows GOARCH=amd64 go build -o telegraf.exe -ldflags \
"-X main.version=$(VERSION) -X main.commit=$(COMMIT) -X main.branch=$(BRANCH)" \
./cmd/telegraf/telegraf.go
build-for-docker:
CGO_ENABLED=0 GOOS=linux go build -installsuffix cgo -o telegraf -ldflags \
"-s -X main.version=$(VERSION) -X main.commit=$(COMMIT) -X main.branch=$(BRANCH)" \
./cmd/telegraf/telegraf.go
# run package script
package:
./scripts/build.py --package --version="$(VERSION)" --platform=linux --arch=all --upload
# Get dependencies and use gdm to checkout changesets
prepare:
deps:
go get -u github.com/golang/lint/golint
go get github.com/sparrc/gdm
gdm restore
# Use the windows godeps file to prepare dependencies
prepare-windows:
go get github.com/sparrc/gdm
gdm restore
gdm restore -f Godeps_windows
telegraf:
go build -i -ldflags "$(LDFLAGS)" ./cmd/telegraf
# Run all docker containers necessary for unit tests
docker-run:
docker run --name aerospike -p "3000:3000" -d aerospike/aerospike-server:3.9.0
docker run --name kafka \
-e ADVERTISED_HOST=localhost \
-e ADVERTISED_PORT=9092 \
-p "2181:2181" -p "9092:9092" \
-d spotify/kafka
docker run --name mysql -p "3306:3306" -e MYSQL_ALLOW_EMPTY_PASSWORD=yes -d mysql
docker run --name memcached -p "11211:11211" -d memcached
docker run --name postgres -p "5432:5432" -d postgres
docker run --name rabbitmq -p "15672:15672" -p "5672:5672" -d rabbitmq:3-management
docker run --name redis -p "6379:6379" -d redis
docker run --name nsq -p "4150:4150" -d nsqio/nsq /nsqd
docker run --name mqtt -p "1883:1883" -d ncarlier/mqtt
docker run --name riemann -p "5555:5555" -d blalor/riemann
docker run --name nats -p "4222:4222" -d nats
go-install:
go install -ldflags "-w -s $(LDFLAGS)" ./cmd/telegraf
# Run docker containers necessary for CircleCI unit tests
docker-run-circle:
docker run --name aerospike -p "3000:3000" -d aerospike/aerospike-server:3.9.0
docker run --name kafka \
-e ADVERTISED_HOST=localhost \
-e ADVERTISED_PORT=9092 \
-p "2181:2181" -p "9092:9092" \
-d spotify/kafka
docker run --name nsq -p "4150:4150" -d nsqio/nsq /nsqd
docker run --name mqtt -p "1883:1883" -d ncarlier/mqtt
docker run --name riemann -p "5555:5555" -d blalor/riemann
docker run --name nats -p "4222:4222" -d nats
install: telegraf
mkdir -p $(DESTDIR)$(PREFIX)/bin/
cp $(TELEGRAF) $(DESTDIR)$(PREFIX)/bin/
# Kill all docker containers, ignore errors
docker-kill:
-docker kill nsq aerospike redis rabbitmq postgres memcached mysql kafka mqtt riemann nats
-docker rm nsq aerospike redis rabbitmq postgres memcached mysql kafka mqtt riemann nats
# Run full unit tests using docker containers (includes setup and teardown)
test: vet docker-kill docker-run
# Sleeping for kafka leadership election, TSDB setup, etc.
sleep 60
# SUCCESS, running tests
go test -race ./...
# Run "short" unit tests
test-short: vet
test:
go test -short ./...
vet:
go vet ./...
fmt:
@gofmt -w $(filter-out plugins/parsers/influx/machine.go, $(GOFILES))
.PHONY: test test-short vet build default
fmtcheck:
@echo '[INFO] running gofmt to identify incorrectly formatted code...'
@if [ ! -z "$(GOFMT)" ]; then \
echo "[ERROR] gofmt has found errors in the following files:" ; \
echo "$(GOFMT)" ; \
echo "" ;\
echo "Run make fmt to fix them." ; \
exit 1 ;\
fi
@echo '[INFO] done.'
test-windows:
go test ./plugins/inputs/ping/...
go test ./plugins/inputs/win_perf_counters/...
go test ./plugins/inputs/win_services/...
go test ./plugins/inputs/procstat/...
go test ./plugins/inputs/ntpq/...
# vet runs the Go source code static analysis tool `vet` to find
# any common errors.
vet:
@echo 'go vet $$(go list ./... | grep -v ./plugins/parsers/influx)'
@go vet $$(go list ./... | grep -v ./plugins/parsers/influx) ; if [ $$? -ne 0 ]; then \
echo ""; \
echo "go vet has found suspicious constructs. Please remediate any reported errors"; \
echo "to fix them before submitting code for review."; \
exit 1; \
fi
test-ci: fmtcheck vet
go test -short ./...
test-all: fmtcheck vet
go test ./...
package:
./scripts/build.py --package --platform=all --arch=all
clean:
rm -f telegraf
rm -f telegraf.exe
docker-image:
./scripts/build.py --package --platform=linux --arch=amd64
cp build/telegraf*$(COMMIT)*.deb .
docker build -f scripts/dev.docker --build-arg "package=telegraf*$(COMMIT)*.deb" -t "telegraf-dev:$(COMMIT)" .
plugins/parsers/influx/machine.go: plugins/parsers/influx/machine.go.rl
ragel -Z -G2 $^ -o $@
.PHONY: deps telegraf install test test-windows lint vet test-all package clean docker-image fmtcheck uint64

204
README.md
View File

@@ -5,8 +5,7 @@ and writing metrics.
Design goals are to have a minimal memory footprint with a plugin system so
that developers in the community can easily add support for collecting metrics
from well known services (like Hadoop, Postgres, or Redis) and third party
APIs (like Mailchimp, AWS CloudWatch, or Google Analytics).
. For an example configuration referencet from local or remote services.
Telegraf is plugin-driven and has the concept of 4 distinct plugins:
@@ -20,113 +19,101 @@ For more information on Processor and Aggregator plugins please [read this](./do
New plugins are designed to be easy to contribute,
we'll eagerly accept pull
requests and will manage the set of plugins that Telegraf supports.
See the [contributing guide](CONTRIBUTING.md) for instructions on writing
new plugins.
## Contributing
There are many ways to contribute:
- Fix and [report bugs](https://github.com/influxdata/telegraf/issues/new)
- [Improve documentation](https://github.com/influxdata/telegraf/issues?q=is%3Aopen+label%3Adocumentation)
- [Review code and feature proposals](https://github.com/influxdata/telegraf/pulls)
- Answer questions on github and on the [Community Site](https://community.influxdata.com/)
- [Contribute plugins](CONTRIBUTING.md)
## Installation:
### Linux deb and rpm Packages:
Latest:
* https://dl.influxdata.com/telegraf/releases/telegraf_1.1.1_amd64.deb
* https://dl.influxdata.com/telegraf/releases/telegraf-1.1.1.x86_64.rpm
Latest (arm):
* https://dl.influxdata.com/telegraf/releases/telegraf_1.1.1_armhf.deb
* https://dl.influxdata.com/telegraf/releases/telegraf-1.1.1.armhf.rpm
##### Package Instructions:
* Telegraf binary is installed in `/usr/bin/telegraf`
* Telegraf daemon configuration file is in `/etc/telegraf/telegraf.conf`
* On sysv systems, the telegraf daemon can be controlled via
`service telegraf [action]`
* On systemd systems (such as Ubuntu 15+), the telegraf daemon can be
controlled via `systemctl [action] telegraf`
### yum/apt Repositories:
There is a yum/apt repo available for the whole InfluxData stack, see
[here](https://docs.influxdata.com/influxdb/latest/introduction/installation/#installation)
for instructions on setting up the repo. Once it is configured, you will be able
to use this repo to install & update telegraf.
### Linux tarballs:
Latest:
* https://dl.influxdata.com/telegraf/releases/telegraf-1.1.1_linux_amd64.tar.gz
* https://dl.influxdata.com/telegraf/releases/telegraf-1.1.1_linux_i386.tar.gz
* https://dl.influxdata.com/telegraf/releases/telegraf-1.1.1_linux_armhf.tar.gz
### FreeBSD tarball:
Latest:
* https://dl.influxdata.com/telegraf/releases/telegraf-1.1.1_freebsd_amd64.tar.gz
You can download the binaries directly from the [downloads](https://www.influxdata.com/downloads) page
or from the [releases](https://github.com/influxdata/telegraf/releases) section.
### Ansible Role:
Ansible role: https://github.com/rossmcdonald/telegraf
### OSX via Homebrew:
```
brew update
brew install telegraf
```
### Windows Binaries (EXPERIMENTAL)
Latest:
* https://dl.influxdata.com/telegraf/releases/telegraf-1.1.1_windows_amd64.zip
### From Source:
Telegraf manages dependencies via [gdm](https://github.com/sparrc/gdm),
which gets installed via the Makefile
if you don't have it already. You also must build with golang version 1.5+.
Telegraf requires golang version 1.8+, the Makefile requires GNU make.
Dependencies are managed with [gdm](https://github.com/sparrc/gdm),
which is installed by the Makefile if you don't have it already.
1. [Install Go](https://golang.org/doc/install)
2. [Setup your GOPATH](https://golang.org/doc/code.html#GOPATH)
3. Run `go get github.com/influxdata/telegraf`
3. Run `go get -d github.com/influxdata/telegraf`
4. Run `cd $GOPATH/src/github.com/influxdata/telegraf`
5. Run `make`
### Nightly Builds
These builds are generated from the master branch:
- [telegraf_nightly_amd64.deb](https://dl.influxdata.com/telegraf/nightlies/telegraf_nightly_amd64.deb)
- [telegraf_nightly_arm64.deb](https://dl.influxdata.com/telegraf/nightlies/telegraf_nightly_arm64.deb)
- [telegraf-nightly.arm64.rpm](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly.arm64.rpm)
- [telegraf_nightly_armel.deb](https://dl.influxdata.com/telegraf/nightlies/telegraf_nightly_armel.deb)
- [telegraf-nightly.armel.rpm](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly.armel.rpm)
- [telegraf_nightly_armhf.deb](https://dl.influxdata.com/telegraf/nightlies/telegraf_nightly_armhf.deb)
- [telegraf-nightly.armv6hl.rpm](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly.armv6hl.rpm)
- [telegraf-nightly_freebsd_amd64.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_freebsd_amd64.tar.gz)
- [telegraf-nightly_freebsd_i386.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_freebsd_i386.tar.gz)
- [telegraf_nightly_i386.deb](https://dl.influxdata.com/telegraf/nightlies/telegraf_nightly_i386.deb)
- [telegraf-nightly.i386.rpm](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly.i386.rpm)
- [telegraf-nightly_linux_amd64.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_linux_amd64.tar.gz)
- [telegraf-nightly_linux_arm64.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_linux_arm64.tar.gz)
- [telegraf-nightly_linux_armel.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_linux_armel.tar.gz)
- [telegraf-nightly_linux_armhf.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_linux_armhf.tar.gz)
- [telegraf-nightly_linux_i386.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_linux_i386.tar.gz)
- [telegraf-nightly_linux_s390x.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_linux_s390x.tar.gz)
- [telegraf_nightly_s390x.deb](https://dl.influxdata.com/telegraf/nightlies/telegraf_nightly_s390x.deb)
- [telegraf-nightly.s390x.rpm](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly.s390x.rpm)
- [telegraf-nightly_windows_amd64.zip](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_windows_amd64.zip)
- [telegraf-nightly_windows_i386.zip](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_windows_i386.zip)
- [telegraf-nightly.x86_64.rpm](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly.x86_64.rpm)
- [telegraf-static-nightly_linux_amd64.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-static-nightly_linux_amd64.tar.gz)
## How to use it:
See usage with:
```
telegraf --help
./telegraf --help
```
### Generate a telegraf config file:
#### Generate a telegraf config file:
```
telegraf config > telegraf.conf
./telegraf config > telegraf.conf
```
### Generate config with only cpu input & influxdb output plugins defined
#### Generate config with only cpu input & influxdb output plugins defined:
```
telegraf --input-filter cpu --output-filter influxdb config
./telegraf --input-filter cpu --output-filter influxdb config
```
### Run a single telegraf collection, outputing metrics to stdout
#### Run a single telegraf collection, outputing metrics to stdout:
```
telegraf --config telegraf.conf -test
./telegraf --config telegraf.conf --test
```
### Run telegraf with all plugins defined in config file
#### Run telegraf with all plugins defined in config file:
```
telegraf --config telegraf.conf
./telegraf --config telegraf.conf
```
### Run telegraf, enabling the cpu & memory input, and influxdb output plugins
#### Run telegraf, enabling the cpu & memory input, and influxdb output plugins:
```
telegraf --config telegraf.conf -input-filter cpu:mem -output-filter influxdb
./telegraf --config telegraf.conf --input-filter cpu:mem --output-filter influxdb
```
@@ -137,68 +124,99 @@ configuration options.
## Input Plugins
* [aws cloudwatch](./plugins/inputs/cloudwatch)
* [aerospike](./plugins/inputs/aerospike)
* [amqp_consumer](./plugins/inputs/amqp_consumer) (rabbitmq)
* [apache](./plugins/inputs/apache)
* [aws cloudwatch](./plugins/inputs/cloudwatch)
* [bcache](./plugins/inputs/bcache)
* [cassandra](./plugins/inputs/cassandra)
* [bond](./plugins/inputs/bond)
* [cassandra](./plugins/inputs/cassandra) (deprecated, use [jolokia2](./plugins/inputs/jolokia2))
* [ceph](./plugins/inputs/ceph)
* [cgroup](./plugins/inputs/cgroup)
* [chrony](./plugins/inputs/chrony)
* [consul](./plugins/inputs/consul)
* [conntrack](./plugins/inputs/conntrack)
* [couchbase](./plugins/inputs/couchbase)
* [couchdb](./plugins/inputs/couchdb)
* [DC/OS](./plugins/inputs/dcos)
* [disque](./plugins/inputs/disque)
* [dmcache](./plugins/inputs/dmcache)
* [dns query time](./plugins/inputs/dns_query)
* [docker](./plugins/inputs/docker)
* [dovecot](./plugins/inputs/dovecot)
* [elasticsearch](./plugins/inputs/elasticsearch)
* [exec](./plugins/inputs/exec) (generic executable plugin, support JSON, influx, graphite and nagios)
* [fail2ban](./plugins/inputs/fail2ban)
* [fibaro](./plugins/inputs/fibaro)
* [filestat](./plugins/inputs/filestat)
* [fluentd](./plugins/inputs/fluentd)
* [graylog](./plugins/inputs/graylog)
* [haproxy](./plugins/inputs/haproxy)
* [hddtemp](./plugins/inputs/hddtemp)
* [http](./plugins/inputs/http) (generic HTTP plugin, supports using input data formats)
* [http_response](./plugins/inputs/http_response)
* [httpjson](./plugins/inputs/httpjson) (generic JSON-emitting http service plugin)
* [internal](./plugins/inputs/internal)
* [influxdb](./plugins/inputs/influxdb)
* [interrupts](./plugins/inputs/interrupts)
* [ipmi_sensor](./plugins/inputs/ipmi_sensor)
* [iptables](./plugins/inputs/iptables)
* [jolokia](./plugins/inputs/jolokia)
* [ipset](./plugins/inputs/ipset)
* [jolokia](./plugins/inputs/jolokia) (deprecated, use [jolokia2](./plugins/inputs/jolokia2))
* [jolokia2](./plugins/inputs/jolokia2) (java, cassandra, kafka)
* [kapacitor](./plugins/inputs/kapacitor)
* [kubernetes](./plugins/inputs/kubernetes)
* [leofs](./plugins/inputs/leofs)
* [lustre2](./plugins/inputs/lustre2)
* [mailchimp](./plugins/inputs/mailchimp)
* [mcrouter](./plugins/inputs/mcrouter)
* [memcached](./plugins/inputs/memcached)
* [mesos](./plugins/inputs/mesos)
* [minecraft](./plugins/inputs/minecraft)
* [mongodb](./plugins/inputs/mongodb)
* [mysql](./plugins/inputs/mysql)
* [nats](./plugins/inputs/nats)
* [net_response](./plugins/inputs/net_response)
* [nginx](./plugins/inputs/nginx)
* [nginx_plus](./plugins/inputs/nginx_plus)
* [nsq](./plugins/inputs/nsq)
* [nstat](./plugins/inputs/nstat)
* [ntpq](./plugins/inputs/ntpq)
* [nvidia_smi](./plugins/inputs/nvidia_smi)
* [openldap](./plugins/inputs/openldap)
* [opensmtpd](./plugins/inputs/opensmtpd)
* [pf](./plugins/inputs/pf)
* [phpfpm](./plugins/inputs/phpfpm)
* [phusion passenger](./plugins/inputs/passenger)
* [ping](./plugins/inputs/ping)
* [postgresql](./plugins/inputs/postgresql)
* [postfix](./plugins/inputs/postfix)
* [postgresql_extensible](./plugins/inputs/postgresql_extensible)
* [postgresql](./plugins/inputs/postgresql)
* [powerdns](./plugins/inputs/powerdns)
* [procstat](./plugins/inputs/procstat)
* [prometheus](./plugins/inputs/prometheus)
* [prometheus](./plugins/inputs/prometheus) (can be used for [Caddy server](./plugins/inputs/prometheus/README.md#usage-for-caddy-http-server))
* [puppetagent](./plugins/inputs/puppetagent)
* [rabbitmq](./plugins/inputs/rabbitmq)
* [raindrops](./plugins/inputs/raindrops)
* [redis](./plugins/inputs/redis)
* [rethinkdb](./plugins/inputs/rethinkdb)
* [riak](./plugins/inputs/riak)
* [salesforce](./plugins/inputs/salesforce)
* [sensors](./plugins/inputs/sensors)
* [smart](./plugins/inputs/smart)
* [snmp](./plugins/inputs/snmp)
* [snmp_legacy](./plugins/inputs/snmp_legacy)
* [solr](./plugins/inputs/solr)
* [sql server](./plugins/inputs/sqlserver) (microsoft)
* [teamspeak](./plugins/inputs/teamspeak)
* [tomcat](./plugins/inputs/tomcat)
* [twemproxy](./plugins/inputs/twemproxy)
* [unbound](./plugins/inputs/unbound)
* [varnish](./plugins/inputs/varnish)
* [zfs](./plugins/inputs/zfs)
* [zookeeper](./plugins/inputs/zookeeper)
* [win_perf_counters ](./plugins/inputs/win_perf_counters) (windows performance counters)
* [win_perf_counters](./plugins/inputs/win_perf_counters) (windows performance counters)
* [win_services](./plugins/inputs/win_services)
* [sysstat](./plugins/inputs/sysstat)
* [system](./plugins/inputs/system)
* cpu
@@ -211,6 +229,7 @@ configuration options.
* processes
* kernel (/proc/stat)
* kernel (/proc/vmstat)
* linux_sysctl_fs (/proc/sys/fs)
Telegraf can also collect metrics via the following service plugins:
@@ -221,32 +240,52 @@ Telegraf can also collect metrics via the following service plugins:
* [nsq_consumer](./plugins/inputs/nsq_consumer)
* [logparser](./plugins/inputs/logparser)
* [statsd](./plugins/inputs/statsd)
* [socket_listener](./plugins/inputs/socket_listener)
* [tail](./plugins/inputs/tail)
* [tcp_listener](./plugins/inputs/tcp_listener)
* [udp_listener](./plugins/inputs/udp_listener)
* [tcp_listener](./plugins/inputs/socket_listener)
* [udp_listener](./plugins/inputs/socket_listener)
* [webhooks](./plugins/inputs/webhooks)
* [filestack](./plugins/inputs/webhooks/filestack)
* [github](./plugins/inputs/webhooks/github)
* [mandrill](./plugins/inputs/webhooks/mandrill)
* [papertrail](./plugins/inputs/webhooks/papertrail)
* [particle](./plugins/inputs/webhooks/particle)
* [rollbar](./plugins/inputs/webhooks/rollbar)
* [zipkin](./plugins/inputs/zipkin)
Telegraf is able to parse the following input data formats into metrics, these
formats may be used with input plugins supporting the `data_format` option:
* [InfluxDB Line Protocol](./docs/DATA_FORMATS_INPUT.md#influx)
* [JSON](./docs/DATA_FORMATS_INPUT.md#json)
* [Graphite](./docs/DATA_FORMATS_INPUT.md#graphite)
* [Value](./docs/DATA_FORMATS_INPUT.md#value)
* [Nagios](./docs/DATA_FORMATS_INPUT.md#nagios)
* [Collectd](./docs/DATA_FORMATS_INPUT.md#collectd)
* [Dropwizard](./docs/DATA_FORMATS_INPUT.md#dropwizard)
## Processor Plugins
* [printer](./plugins/processors/printer)
* [override](./plugins/processors/override)
## Aggregator Plugins
* [basicstats](./plugins/aggregators/basicstats)
* [minmax](./plugins/aggregators/minmax)
* [histogram](./plugins/aggregators/histogram)
## Output Plugins
* [influxdb](./plugins/outputs/influxdb)
* [amon](./plugins/outputs/amon)
* [amqp](./plugins/outputs/amqp)
* [amqp](./plugins/outputs/amqp) (rabbitmq)
* [aws kinesis](./plugins/outputs/kinesis)
* [aws cloudwatch](./plugins/outputs/cloudwatch)
* [cratedb](./plugins/outputs/cratedb)
* [datadog](./plugins/outputs/datadog)
* [discard](./plugins/outputs/discard)
* [elasticsearch](./plugins/outputs/elasticsearch)
* [file](./plugins/outputs/file)
* [graphite](./plugins/outputs/graphite)
* [graylog](./plugins/outputs/graylog)
@@ -259,9 +298,8 @@ Telegraf can also collect metrics via the following service plugins:
* [opentsdb](./plugins/outputs/opentsdb)
* [prometheus](./plugins/outputs/prometheus_client)
* [riemann](./plugins/outputs/riemann)
## Contributing
Please see the
[contributing guide](CONTRIBUTING.md)
for details on contributing a plugin to Telegraf.
* [riemann_legacy](./plugins/outputs/riemann_legacy)
* [socket_writer](./plugins/outputs/socket_writer)
* [tcp](./plugins/outputs/socket_writer)
* [udp](./plugins/outputs/socket_writer)
* [wavefront](./plugins/outputs/wavefront)

View File

@@ -28,6 +28,18 @@ type Accumulator interface {
tags map[string]string,
t ...time.Time)
// AddSummary is the same as AddFields, but will add the metric as a "Summary" type
AddSummary(measurement string,
fields map[string]interface{},
tags map[string]string,
t ...time.Time)
// AddHistogram is the same as AddFields, but will add the metric as a "Histogram" type
AddHistogram(measurement string,
fields map[string]interface{},
tags map[string]string,
t ...time.Time)
SetPrecision(precision, interval time.Duration)
AddError(err error)

View File

@@ -26,7 +26,7 @@ type MetricMaker interface {
func NewAccumulator(
maker MetricMaker,
metrics chan telegraf.Metric,
) *accumulator {
) telegraf.Accumulator {
acc := accumulator{
maker: maker,
metrics: metrics,
@@ -76,6 +76,28 @@ func (ac *accumulator) AddCounter(
}
}
func (ac *accumulator) AddSummary(
measurement string,
fields map[string]interface{},
tags map[string]string,
t ...time.Time,
) {
if m := ac.maker.MakeMetric(measurement, fields, tags, telegraf.Summary, ac.getTime(t)); m != nil {
ac.metrics <- m
}
}
func (ac *accumulator) AddHistogram(
measurement string,
fields map[string]interface{},
tags map[string]string,
t ...time.Time,
) {
if m := ac.maker.MakeMetric(measurement, fields, tags, telegraf.Histogram, ac.getTime(t)); m != nil {
ac.metrics <- m
}
}
// AddError passes a runtime error to the accumulator.
// The error will be tagged with the plugin name and written to the log.
func (ac *accumulator) AddError(err error) {

View File

@@ -15,63 +15,36 @@ import (
"github.com/stretchr/testify/require"
)
func TestAdd(t *testing.T) {
now := time.Now()
metrics := make(chan telegraf.Metric, 10)
defer close(metrics)
a := NewAccumulator(&TestMetricMaker{}, metrics)
a.AddFields("acctest",
map[string]interface{}{"value": float64(101)},
map[string]string{})
a.AddFields("acctest",
map[string]interface{}{"value": float64(101)},
map[string]string{"acc": "test"})
a.AddFields("acctest",
map[string]interface{}{"value": float64(101)},
map[string]string{"acc": "test"}, now)
testm := <-metrics
actual := testm.String()
assert.Contains(t, actual, "acctest value=101")
testm = <-metrics
actual = testm.String()
assert.Contains(t, actual, "acctest,acc=test value=101")
testm = <-metrics
actual = testm.String()
assert.Equal(t,
fmt.Sprintf("acctest,acc=test value=101 %d\n", now.UnixNano()),
actual)
}
func TestAddFields(t *testing.T) {
now := time.Now()
metrics := make(chan telegraf.Metric, 10)
defer close(metrics)
a := NewAccumulator(&TestMetricMaker{}, metrics)
tags := map[string]string{"foo": "bar"}
fields := map[string]interface{}{
"usage": float64(99),
}
a.AddFields("acctest", fields, map[string]string{})
a.AddGauge("acctest", fields, map[string]string{"acc": "test"})
a.AddCounter("acctest", fields, map[string]string{"acc": "test"}, now)
now := time.Now()
a.AddCounter("acctest", fields, tags, now)
testm := <-metrics
actual := testm.String()
assert.Contains(t, actual, "acctest usage=99")
testm = <-metrics
actual = testm.String()
assert.Contains(t, actual, "acctest,acc=test usage=99")
require.Equal(t, "acctest", testm.Name())
actual, ok := testm.GetField("usage")
testm = <-metrics
actual = testm.String()
assert.Equal(t,
fmt.Sprintf("acctest,acc=test usage=99 %d\n", now.UnixNano()),
actual)
require.True(t, ok)
require.Equal(t, float64(99), actual)
actual, ok = testm.GetTag("foo")
require.True(t, ok)
require.Equal(t, "bar", actual)
tm := testm.Time()
// okay if monotonic clock differs
require.True(t, now.Equal(tm))
tp := testm.Type()
require.Equal(t, telegraf.Counter, tp)
}
func TestAccAddError(t *testing.T) {
@@ -98,215 +71,61 @@ func TestAccAddError(t *testing.T) {
assert.Contains(t, string(errs[2]), "baz")
}
func TestAddNoIntervalWithPrecision(t *testing.T) {
now := time.Date(2006, time.February, 10, 12, 0, 0, 82912748, time.UTC)
metrics := make(chan telegraf.Metric, 10)
defer close(metrics)
a := NewAccumulator(&TestMetricMaker{}, metrics)
a.SetPrecision(0, time.Second)
func TestSetPrecision(t *testing.T) {
tests := []struct {
name string
unset bool
precision time.Duration
interval time.Duration
timestamp time.Time
expected time.Time
}{
{
name: "default precision is nanosecond",
unset: true,
timestamp: time.Date(2006, time.February, 10, 12, 0, 0, 82912748, time.UTC),
expected: time.Date(2006, time.February, 10, 12, 0, 0, 82912748, time.UTC),
},
{
name: "second interval",
interval: time.Second,
timestamp: time.Date(2006, time.February, 10, 12, 0, 0, 82912748, time.UTC),
expected: time.Date(2006, time.February, 10, 12, 0, 0, 0, time.UTC),
},
{
name: "microsecond interval",
interval: time.Microsecond,
timestamp: time.Date(2006, time.February, 10, 12, 0, 0, 82912748, time.UTC),
expected: time.Date(2006, time.February, 10, 12, 0, 0, 82913000, time.UTC),
},
{
name: "2 second precision",
precision: 2 * time.Second,
timestamp: time.Date(2006, time.February, 10, 12, 0, 2, 4, time.UTC),
expected: time.Date(2006, time.February, 10, 12, 0, 2, 0, time.UTC),
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
metrics := make(chan telegraf.Metric, 10)
a.AddFields("acctest",
map[string]interface{}{"value": float64(101)},
map[string]string{})
a.AddFields("acctest",
map[string]interface{}{"value": float64(101)},
map[string]string{"acc": "test"})
a.AddFields("acctest",
map[string]interface{}{"value": float64(101)},
map[string]string{"acc": "test"}, now)
a := NewAccumulator(&TestMetricMaker{}, metrics)
if !tt.unset {
a.SetPrecision(tt.precision, tt.interval)
}
testm := <-a.metrics
actual := testm.String()
assert.Contains(t, actual, "acctest value=101")
a.AddFields("acctest",
map[string]interface{}{"value": float64(101)},
map[string]string{},
tt.timestamp,
)
testm = <-a.metrics
actual = testm.String()
assert.Contains(t, actual, "acctest,acc=test value=101")
testm := <-metrics
require.Equal(t, tt.expected, testm.Time())
testm = <-a.metrics
actual = testm.String()
assert.Equal(t,
fmt.Sprintf("acctest,acc=test value=101 %d\n", int64(1139572800000000000)),
actual)
}
func TestAddDisablePrecision(t *testing.T) {
now := time.Date(2006, time.February, 10, 12, 0, 0, 82912748, time.UTC)
metrics := make(chan telegraf.Metric, 10)
defer close(metrics)
a := NewAccumulator(&TestMetricMaker{}, metrics)
a.SetPrecision(time.Nanosecond, 0)
a.AddFields("acctest",
map[string]interface{}{"value": float64(101)},
map[string]string{})
a.AddFields("acctest",
map[string]interface{}{"value": float64(101)},
map[string]string{"acc": "test"})
a.AddFields("acctest",
map[string]interface{}{"value": float64(101)},
map[string]string{"acc": "test"}, now)
testm := <-a.metrics
actual := testm.String()
assert.Contains(t, actual, "acctest value=101")
testm = <-a.metrics
actual = testm.String()
assert.Contains(t, actual, "acctest,acc=test value=101")
testm = <-a.metrics
actual = testm.String()
assert.Equal(t,
fmt.Sprintf("acctest,acc=test value=101 %d\n", int64(1139572800082912748)),
actual)
}
func TestAddNoPrecisionWithInterval(t *testing.T) {
now := time.Date(2006, time.February, 10, 12, 0, 0, 82912748, time.UTC)
metrics := make(chan telegraf.Metric, 10)
defer close(metrics)
a := NewAccumulator(&TestMetricMaker{}, metrics)
a.SetPrecision(0, time.Second)
a.AddFields("acctest",
map[string]interface{}{"value": float64(101)},
map[string]string{})
a.AddFields("acctest",
map[string]interface{}{"value": float64(101)},
map[string]string{"acc": "test"})
a.AddFields("acctest",
map[string]interface{}{"value": float64(101)},
map[string]string{"acc": "test"}, now)
testm := <-a.metrics
actual := testm.String()
assert.Contains(t, actual, "acctest value=101")
testm = <-a.metrics
actual = testm.String()
assert.Contains(t, actual, "acctest,acc=test value=101")
testm = <-a.metrics
actual = testm.String()
assert.Equal(t,
fmt.Sprintf("acctest,acc=test value=101 %d\n", int64(1139572800000000000)),
actual)
}
func TestDifferentPrecisions(t *testing.T) {
now := time.Date(2006, time.February, 10, 12, 0, 0, 82912748, time.UTC)
metrics := make(chan telegraf.Metric, 10)
defer close(metrics)
a := NewAccumulator(&TestMetricMaker{}, metrics)
a.SetPrecision(0, time.Second)
a.AddFields("acctest",
map[string]interface{}{"value": float64(101)},
map[string]string{"acc": "test"}, now)
testm := <-a.metrics
actual := testm.String()
assert.Equal(t,
fmt.Sprintf("acctest,acc=test value=101 %d\n", int64(1139572800000000000)),
actual)
a.SetPrecision(0, time.Millisecond)
a.AddFields("acctest",
map[string]interface{}{"value": float64(101)},
map[string]string{"acc": "test"}, now)
testm = <-a.metrics
actual = testm.String()
assert.Equal(t,
fmt.Sprintf("acctest,acc=test value=101 %d\n", int64(1139572800083000000)),
actual)
a.SetPrecision(0, time.Microsecond)
a.AddFields("acctest",
map[string]interface{}{"value": float64(101)},
map[string]string{"acc": "test"}, now)
testm = <-a.metrics
actual = testm.String()
assert.Equal(t,
fmt.Sprintf("acctest,acc=test value=101 %d\n", int64(1139572800082913000)),
actual)
a.SetPrecision(0, time.Nanosecond)
a.AddFields("acctest",
map[string]interface{}{"value": float64(101)},
map[string]string{"acc": "test"}, now)
testm = <-a.metrics
actual = testm.String()
assert.Equal(t,
fmt.Sprintf("acctest,acc=test value=101 %d\n", int64(1139572800082912748)),
actual)
}
func TestAddGauge(t *testing.T) {
now := time.Now()
metrics := make(chan telegraf.Metric, 10)
defer close(metrics)
a := NewAccumulator(&TestMetricMaker{}, metrics)
a.AddGauge("acctest",
map[string]interface{}{"value": float64(101)},
map[string]string{})
a.AddGauge("acctest",
map[string]interface{}{"value": float64(101)},
map[string]string{"acc": "test"})
a.AddGauge("acctest",
map[string]interface{}{"value": float64(101)},
map[string]string{"acc": "test"}, now)
testm := <-metrics
actual := testm.String()
assert.Contains(t, actual, "acctest value=101")
assert.Equal(t, testm.Type(), telegraf.Gauge)
testm = <-metrics
actual = testm.String()
assert.Contains(t, actual, "acctest,acc=test value=101")
assert.Equal(t, testm.Type(), telegraf.Gauge)
testm = <-metrics
actual = testm.String()
assert.Equal(t,
fmt.Sprintf("acctest,acc=test value=101 %d\n", now.UnixNano()),
actual)
assert.Equal(t, testm.Type(), telegraf.Gauge)
}
func TestAddCounter(t *testing.T) {
now := time.Now()
metrics := make(chan telegraf.Metric, 10)
defer close(metrics)
a := NewAccumulator(&TestMetricMaker{}, metrics)
a.AddCounter("acctest",
map[string]interface{}{"value": float64(101)},
map[string]string{})
a.AddCounter("acctest",
map[string]interface{}{"value": float64(101)},
map[string]string{"acc": "test"})
a.AddCounter("acctest",
map[string]interface{}{"value": float64(101)},
map[string]string{"acc": "test"}, now)
testm := <-metrics
actual := testm.String()
assert.Contains(t, actual, "acctest value=101")
assert.Equal(t, testm.Type(), telegraf.Counter)
testm = <-metrics
actual = testm.String()
assert.Contains(t, actual, "acctest,acc=test value=101")
assert.Equal(t, testm.Type(), telegraf.Counter)
testm = <-metrics
actual = testm.String()
assert.Equal(t,
fmt.Sprintf("acctest,acc=test value=101 %d\n", now.UnixNano()),
actual)
assert.Equal(t, testm.Type(), telegraf.Counter)
close(metrics)
})
}
}
type TestMetricMaker struct {

View File

@@ -143,7 +143,7 @@ func (a *Agent) gatherer(
func gatherWithTimeout(
shutdown chan struct{},
input *models.RunningInput,
acc *accumulator,
acc telegraf.Accumulator,
timeout time.Duration,
) {
ticker := time.NewTicker(timeout)
@@ -157,13 +157,13 @@ func gatherWithTimeout(
select {
case err := <-done:
if err != nil {
log.Printf("E! ERROR in input [%s]: %s", input.Name(), err)
acc.AddError(err)
}
return
case <-ticker.C:
log.Printf("E! ERROR: input [%s] took longer to collect than "+
"collection interval (%s)",
input.Name(), timeout)
err := fmt.Errorf("took longer to collect than collection interval (%s)",
timeout)
acc.AddError(err)
continue
case <-shutdown:
return
@@ -191,17 +191,18 @@ func (a *Agent) Test() error {
}()
for _, input := range a.Config.Inputs {
if _, ok := input.Input.(telegraf.ServiceInput); ok {
fmt.Printf("\nWARNING: skipping plugin [[%s]]: service inputs not supported in --test mode\n",
input.Name())
continue
}
acc := NewAccumulator(input, metricC)
acc.SetPrecision(a.Config.Agent.Precision.Duration,
a.Config.Agent.Interval.Duration)
input.SetTrace(true)
input.SetDefaultTags(a.Config.Tags)
fmt.Printf("* Plugin: %s, Collection 1\n", input.Name())
if input.Config.Interval != 0 {
fmt.Printf("* Internal: %s\n", input.Config.Interval)
}
if err := input.Input.Gather(acc); err != nil {
return err
}
@@ -209,9 +210,8 @@ func (a *Agent) Test() error {
// Special instructions for some inputs. cpu, for example, needs to be
// run twice in order to return cpu usage percentages.
switch input.Name() {
case "cpu", "mongodb", "procstat":
case "inputs.cpu", "inputs.mongodb", "inputs.procstat":
time.Sleep(500 * time.Millisecond)
fmt.Printf("* Plugin: %s, Collection 2\n", input.Name())
if err := input.Input.Gather(acc); err != nil {
return err
}
@@ -241,12 +241,12 @@ func (a *Agent) flush() {
}
// flusher monitors the metrics input channel and flushes on the minimum interval
func (a *Agent) flusher(shutdown chan struct{}, metricC chan telegraf.Metric) error {
func (a *Agent) flusher(shutdown chan struct{}, metricC chan telegraf.Metric, aggC chan telegraf.Metric) error {
// Inelegant, but this sleep is to allow the Gather threads to run, so that
// the flusher will flush after metrics are collected.
time.Sleep(time.Millisecond * 300)
// create an output metric channel and a gorouting that continously passes
// create an output metric channel and a gorouting that continuously passes
// each metric onto the output plugins & aggregators.
outMetricC := make(chan telegraf.Metric, 100)
var wg sync.WaitGroup
@@ -265,11 +265,9 @@ func (a *Agent) flusher(shutdown chan struct{}, metricC chan telegraf.Metric) er
// if dropOriginal is set to true, then we will only send this
// metric to the aggregators, not the outputs.
var dropOriginal bool
if !m.IsAggregate() {
for _, agg := range a.Config.Aggregators {
if ok := agg.Add(m.Copy()); ok {
dropOriginal = true
}
for _, agg := range a.Config.Aggregators {
if ok := agg.Add(m.Copy()); ok {
dropOriginal = true
}
}
if !dropOriginal {
@@ -285,7 +283,37 @@ func (a *Agent) flusher(shutdown chan struct{}, metricC chan telegraf.Metric) er
}
}()
wg.Add(1)
go func() {
defer wg.Done()
for {
select {
case <-shutdown:
if len(aggC) > 0 {
// keep going until aggC is flushed
continue
}
return
case metric := <-aggC:
metrics := []telegraf.Metric{metric}
for _, processor := range a.Config.Processors {
metrics = processor.Apply(metrics...)
}
for _, m := range metrics {
for i, o := range a.Config.Outputs {
if i == len(a.Config.Outputs)-1 {
o.AddMetric(m)
} else {
o.AddMetric(m.Copy())
}
}
}
}
}
}()
ticker := time.NewTicker(a.Config.Agent.FlushInterval.Duration)
semaphore := make(chan struct{}, 1)
for {
select {
case <-shutdown:
@@ -295,8 +323,18 @@ func (a *Agent) flusher(shutdown chan struct{}, metricC chan telegraf.Metric) er
a.flush()
return nil
case <-ticker.C:
internal.RandomSleep(a.Config.Agent.FlushJitter.Duration, shutdown)
a.flush()
go func() {
select {
case semaphore <- struct{}{}:
internal.RandomSleep(a.Config.Agent.FlushJitter.Duration, shutdown)
a.flush()
<-semaphore
default:
// skipping this flush because one is already happening
log.Println("W! Skipping a scheduled flush because there is" +
" already a flush ongoing.")
}
}()
case metric := <-metricC:
// NOTE potential bottleneck here as we put each metric through the
// processors serially.
@@ -322,6 +360,7 @@ func (a *Agent) Run(shutdown chan struct{}) error {
// channel shared between all input threads for accumulating metrics
metricC := make(chan telegraf.Metric, 100)
aggC := make(chan telegraf.Metric, 100)
// Start all ServicePlugins
for _, input := range a.Config.Inputs {
@@ -350,7 +389,7 @@ func (a *Agent) Run(shutdown chan struct{}) error {
wg.Add(1)
go func() {
defer wg.Done()
if err := a.flusher(shutdown, metricC); err != nil {
if err := a.flusher(shutdown, metricC, aggC); err != nil {
log.Printf("E! Flusher routine failed, exiting: %s\n", err.Error())
close(shutdown)
}
@@ -360,7 +399,7 @@ func (a *Agent) Run(shutdown chan struct{}) error {
for _, aggregator := range a.Config.Aggregators {
go func(agg *models.RunningAggregator) {
defer wg.Done()
acc := NewAccumulator(agg, metricC)
acc := NewAccumulator(agg, aggC)
acc.SetPrecision(a.Config.Agent.Precision.Duration,
a.Config.Agent.Interval.Duration)
agg.Run(acc, shutdown)
@@ -381,5 +420,6 @@ func (a *Agent) Run(shutdown chan struct{}) error {
}
wg.Wait()
a.Close()
return nil
}

34
appveyor.yml Normal file
View File

@@ -0,0 +1,34 @@
image: Previous Visual Studio 2015
version: "{build}"
cache:
- C:\Cache
clone_folder: C:\gopath\src\github.com\influxdata\telegraf
environment:
GOPATH: C:\gopath
platform: x64
install:
- IF NOT EXIST "C:\Cache" mkdir C:\Cache
- IF NOT EXIST "C:\Cache\go1.10.1.msi" curl -o "C:\Cache\go1.10.1.msi" https://storage.googleapis.com/golang/go1.10.1.windows-amd64.msi
- IF NOT EXIST "C:\Cache\gnuwin32-bin.zip" curl -o "C:\Cache\gnuwin32-bin.zip" https://dl.influxdata.com/telegraf/ci/make-3.81-bin.zip
- IF NOT EXIST "C:\Cache\gnuwin32-dep.zip" curl -o "C:\Cache\gnuwin32-dep.zip" https://dl.influxdata.com/telegraf/ci/make-3.81-dep.zip
- IF EXIST "C:\Go" rmdir /S /Q C:\Go
- msiexec.exe /i "C:\Cache\go1.10.1.msi" /quiet
- 7z x "C:\Cache\gnuwin32-bin.zip" -oC:\GnuWin32 -y
- 7z x "C:\Cache\gnuwin32-dep.zip" -oC:\GnuWin32 -y
- go version
- go env
build_script:
- cmd: C:\GnuWin32\bin\make deps
- cmd: C:\GnuWin32\bin\make telegraf
test_script:
- cmd: C:\GnuWin32\bin\make test-windows
artifacts:
- path: telegraf.exe

View File

@@ -1,18 +0,0 @@
machine:
services:
- docker
post:
- sudo service zookeeper stop
- go version
- go version | grep 1.7.4 || sudo rm -rf /usr/local/go
- wget https://storage.googleapis.com/golang/go1.7.4.linux-amd64.tar.gz
- sudo tar -C /usr/local -xzf go1.7.4.linux-amd64.tar.gz
- go version
dependencies:
override:
- docker info
test:
override:
- bash scripts/circle-test.sh

View File

@@ -4,6 +4,8 @@ import (
"flag"
"fmt"
"log"
"net/http"
_ "net/http/pprof" // Comment this line to disable pprof endpoint.
"os"
"os/signal"
"runtime"
@@ -24,6 +26,8 @@ import (
var fDebug = flag.Bool("debug", false,
"turn on debug logging")
var pprofAddr = flag.String("pprof-addr", "",
"pprof address to listen on, not activate pprof if empty")
var fQuiet = flag.Bool("quiet", false,
"run in quiet mode")
var fTest = flag.Bool("test", false, "gather metrics, print them out, and exit")
@@ -47,16 +51,16 @@ var fAggregatorFilters = flag.String("aggregator-filter", "",
var fProcessorFilters = flag.String("processor-filter", "",
"filter the processors to enable, separator is :")
var fUsage = flag.String("usage", "",
"print usage for a plugin, ie, 'telegraf -usage mysql'")
"print usage for a plugin, ie, 'telegraf --usage mysql'")
var fService = flag.String("service", "",
"operate on the service")
var fRunAsConsole = flag.Bool("console", false, "run as console application (windows only)")
// Telegraf version, populated linker.
// ie, -ldflags "-X main.version=`git describe --always --tags`"
var (
version string
commit string
branch string
nextVersion = "1.7.0"
version string
commit string
branch string
)
func init() {
@@ -69,134 +73,19 @@ func init() {
}
}
const usage = `Telegraf, The plugin-driven server agent for collecting and reporting metrics.
Usage:
telegraf [commands|flags]
The commands & flags are:
config print out full sample configuration to stdout
version print the version to stdout
--config <file> configuration file to load
--test gather metrics once, print them to stdout, and exit
--config-directory directory containing additional *.conf files
--input-filter filter the input plugins to enable, separator is :
--output-filter filter the output plugins to enable, separator is :
--usage print usage for a plugin, ie, 'telegraf --usage mysql'
--debug print metrics as they're generated to stdout
--quiet run in quiet mode
Examples:
# generate a telegraf config file:
telegraf config > telegraf.conf
# generate config with only cpu input & influxdb output plugins defined
telegraf --input-filter cpu --output-filter influxdb config
# run a single telegraf collection, outputing metrics to stdout
telegraf --config telegraf.conf -test
# run telegraf with all plugins defined in config file
telegraf --config telegraf.conf
# run telegraf, enabling the cpu & memory input, and influxdb output plugins
telegraf --config telegraf.conf --input-filter cpu:mem --output-filter influxdb
`
var stop chan struct{}
var srvc service.Service
type program struct{}
func reloadLoop(stop chan struct{}, s service.Service) {
defer func() {
if service.Interactive() {
os.Exit(0)
}
return
}()
func reloadLoop(
stop chan struct{},
inputFilters []string,
outputFilters []string,
aggregatorFilters []string,
processorFilters []string,
) {
reload := make(chan bool, 1)
reload <- true
for <-reload {
reload <- false
flag.Parse()
args := flag.Args()
var inputFilters []string
if *fInputFilters != "" {
inputFilter := strings.TrimSpace(*fInputFilters)
inputFilters = strings.Split(":"+inputFilter+":", ":")
}
var outputFilters []string
if *fOutputFilters != "" {
outputFilter := strings.TrimSpace(*fOutputFilters)
outputFilters = strings.Split(":"+outputFilter+":", ":")
}
var aggregatorFilters []string
if *fAggregatorFilters != "" {
aggregatorFilter := strings.TrimSpace(*fAggregatorFilters)
aggregatorFilters = strings.Split(":"+aggregatorFilter+":", ":")
}
var processorFilters []string
if *fProcessorFilters != "" {
processorFilter := strings.TrimSpace(*fProcessorFilters)
processorFilters = strings.Split(":"+processorFilter+":", ":")
}
if len(args) > 0 {
switch args[0] {
case "version":
fmt.Printf("Telegraf v%s (git: %s %s)\n", version, branch, commit)
return
case "config":
config.PrintSampleConfig(
inputFilters,
outputFilters,
aggregatorFilters,
processorFilters,
)
return
}
}
// switch for flags which just do something and exit immediately
switch {
case *fOutputList:
fmt.Println("Available Output Plugins:")
for k, _ := range outputs.Outputs {
fmt.Printf(" %s\n", k)
}
return
case *fInputList:
fmt.Println("Available Input Plugins:")
for k, _ := range inputs.Inputs {
fmt.Printf(" %s\n", k)
}
return
case *fVersion:
fmt.Printf("Telegraf v%s (git: %s %s)\n", version, branch, commit)
return
case *fSampleConfig:
config.PrintSampleConfig(
inputFilters,
outputFilters,
aggregatorFilters,
processorFilters,
)
return
case *fUsage != "":
if err := config.PrintInputConfig(*fUsage); err != nil {
if err2 := config.PrintOutputConfig(*fUsage); err2 != nil {
log.Fatalf("E! %s and %s", err, err2)
}
}
return
}
// If no other options are specified, load the config file and run.
c := config.NewConfig()
@@ -213,13 +102,23 @@ func reloadLoop(stop chan struct{}, s service.Service) {
log.Fatal("E! " + err.Error())
}
}
if len(c.Outputs) == 0 {
if !*fTest && len(c.Outputs) == 0 {
log.Fatalf("E! Error: no outputs found, did you provide a valid config file?")
}
if len(c.Inputs) == 0 {
log.Fatalf("E! Error: no inputs found, did you provide a valid config file?")
}
if int64(c.Agent.Interval.Duration) <= 0 {
log.Fatalf("E! Agent interval must be positive, found %s",
c.Agent.Interval.Duration)
}
if int64(c.Agent.FlushInterval.Duration) <= 0 {
log.Fatalf("E! Agent flush_interval must be positive; found %s",
c.Agent.Interval.Duration)
}
ag, err := agent.NewAgent(c)
if err != nil {
log.Fatal("E! " + err.Error())
@@ -237,7 +136,7 @@ func reloadLoop(stop chan struct{}, s service.Service) {
if err != nil {
log.Fatal("E! " + err.Error())
}
return
os.Exit(0)
}
err = ag.Connect()
@@ -265,20 +164,27 @@ func reloadLoop(stop chan struct{}, s service.Service) {
}
}()
log.Printf("I! Starting Telegraf (version %s)\n", version)
log.Printf("I! Starting Telegraf %s\n", displayVersion())
log.Printf("I! Loaded outputs: %s", strings.Join(c.OutputNames(), " "))
log.Printf("I! Loaded inputs: %s", strings.Join(c.InputNames(), " "))
log.Printf("I! Tags enabled: %s", c.ListTags())
if *fPidfile != "" {
f, err := os.Create(*fPidfile)
f, err := os.OpenFile(*fPidfile, os.O_CREATE|os.O_WRONLY, 0644)
if err != nil {
log.Fatalf("E! Unable to create pidfile: %s", err)
log.Printf("E! Unable to create pidfile: %s", err)
} else {
fmt.Fprintf(f, "%d\n", os.Getpid())
f.Close()
defer func() {
err := os.Remove(*fPidfile)
if err != nil {
log.Printf("E! Unable to remove pidfile: %s", err)
}
}()
}
fmt.Fprintf(f, "%d\n", os.Getpid())
f.Close()
}
ag.Run(shutdown)
@@ -290,47 +196,160 @@ func usageExit(rc int) {
os.Exit(rc)
}
type program struct {
inputFilters []string
outputFilters []string
aggregatorFilters []string
processorFilters []string
}
func (p *program) Start(s service.Service) error {
srvc = s
go p.run()
return nil
}
func (p *program) run() {
stop = make(chan struct{})
reloadLoop(stop, srvc)
reloadLoop(
stop,
p.inputFilters,
p.outputFilters,
p.aggregatorFilters,
p.processorFilters,
)
}
func (p *program) Stop(s service.Service) error {
close(stop)
return nil
}
func displayVersion() string {
if version == "" {
return fmt.Sprintf("v%s~%s", nextVersion, commit)
}
return "v" + version
}
func main() {
flag.Usage = func() { usageExit(0) }
flag.Parse()
if runtime.GOOS == "windows" {
args := flag.Args()
inputFilters, outputFilters := []string{}, []string{}
if *fInputFilters != "" {
inputFilters = strings.Split(":"+strings.TrimSpace(*fInputFilters)+":", ":")
}
if *fOutputFilters != "" {
outputFilters = strings.Split(":"+strings.TrimSpace(*fOutputFilters)+":", ":")
}
aggregatorFilters, processorFilters := []string{}, []string{}
if *fAggregatorFilters != "" {
aggregatorFilters = strings.Split(":"+strings.TrimSpace(*fAggregatorFilters)+":", ":")
}
if *fProcessorFilters != "" {
processorFilters = strings.Split(":"+strings.TrimSpace(*fProcessorFilters)+":", ":")
}
if *pprofAddr != "" {
go func() {
pprofHostPort := *pprofAddr
parts := strings.Split(pprofHostPort, ":")
if len(parts) == 2 && parts[0] == "" {
pprofHostPort = fmt.Sprintf("localhost:%s", parts[1])
}
pprofHostPort = "http://" + pprofHostPort + "/debug/pprof"
log.Printf("I! Starting pprof HTTP server at: %s", pprofHostPort)
if err := http.ListenAndServe(*pprofAddr, nil); err != nil {
log.Fatal("E! " + err.Error())
}
}()
}
if len(args) > 0 {
switch args[0] {
case "version":
fmt.Printf("Telegraf %s (git: %s %s)\n", displayVersion(), branch, commit)
return
case "config":
config.PrintSampleConfig(
inputFilters,
outputFilters,
aggregatorFilters,
processorFilters,
)
return
}
}
// switch for flags which just do something and exit immediately
switch {
case *fOutputList:
fmt.Println("Available Output Plugins:")
for k, _ := range outputs.Outputs {
fmt.Printf(" %s\n", k)
}
return
case *fInputList:
fmt.Println("Available Input Plugins:")
for k, _ := range inputs.Inputs {
fmt.Printf(" %s\n", k)
}
return
case *fVersion:
fmt.Printf("Telegraf %s (git: %s %s)\n", displayVersion(), branch, commit)
return
case *fSampleConfig:
config.PrintSampleConfig(
inputFilters,
outputFilters,
aggregatorFilters,
processorFilters,
)
return
case *fUsage != "":
err := config.PrintInputConfig(*fUsage)
err2 := config.PrintOutputConfig(*fUsage)
if err != nil && err2 != nil {
log.Fatalf("E! %s and %s", err, err2)
}
return
}
if runtime.GOOS == "windows" && !(*fRunAsConsole) {
svcConfig := &service.Config{
Name: "telegraf",
DisplayName: "Telegraf Data Collector Service",
Description: "Collects data using a series of plugins and publishes it to" +
"another series of plugins.",
Arguments: []string{"-config", "C:\\Program Files\\Telegraf\\telegraf.conf"},
Arguments: []string{"--config", "C:\\Program Files\\Telegraf\\telegraf.conf"},
}
prg := &program{}
prg := &program{
inputFilters: inputFilters,
outputFilters: outputFilters,
aggregatorFilters: aggregatorFilters,
processorFilters: processorFilters,
}
s, err := service.New(prg, svcConfig)
if err != nil {
log.Fatal("E! " + err.Error())
}
// Handle the -service flag here to prevent any issues with tooling that
// Handle the --service flag here to prevent any issues with tooling that
// may not have an interactive session, e.g. installing from Ansible.
if *fService != "" {
if *fConfig != "" {
(*svcConfig).Arguments = []string{"-config", *fConfig}
(*svcConfig).Arguments = []string{"--config", *fConfig}
}
if *fConfigDirectory != "" {
(*svcConfig).Arguments = append((*svcConfig).Arguments, "--config-directory", *fConfigDirectory)
}
err := service.Control(s, *fService)
if err != nil {
log.Fatal("E! " + err.Error())
}
os.Exit(0)
} else {
err = s.Run()
if err != nil {
@@ -339,6 +358,12 @@ func main() {
}
} else {
stop = make(chan struct{})
reloadLoop(stop, nil)
reloadLoop(
stop,
inputFilters,
outputFilters,
aggregatorFilters,
processorFilters,
)
}
}

45
cmd/telegraf/usage.go Normal file
View File

@@ -0,0 +1,45 @@
// +build !windows
package main
const usage = `Telegraf, The plugin-driven server agent for collecting and reporting metrics.
Usage:
telegraf [commands|flags]
The commands & flags are:
config print out full sample configuration to stdout
version print the version to stdout
--config <file> configuration file to load
--test gather metrics once, print them to stdout, and exit
--config-directory directory containing additional *.conf files
--input-filter filter the input plugins to enable, separator is :
--output-filter filter the output plugins to enable, separator is :
--usage print usage for a plugin, ie, 'telegraf --usage mysql'
--debug print metrics as they're generated to stdout
--pprof-addr pprof address to listen on, format: localhost:6060 or :6060
--quiet run in quiet mode
Examples:
# generate a telegraf config file:
telegraf config > telegraf.conf
# generate config with only cpu input & influxdb output plugins defined
telegraf --input-filter cpu --output-filter influxdb config
# run a single telegraf collection, outputing metrics to stdout
telegraf --config telegraf.conf --test
# run telegraf with all plugins defined in config file
telegraf --config telegraf.conf
# run telegraf, enabling the cpu & memory input, and influxdb output plugins
telegraf --config telegraf.conf --input-filter cpu:mem --output-filter influxdb
# run telegraf with pprof
telegraf --config telegraf.conf --pprof-addr localhost:6060
`

View File

@@ -0,0 +1,54 @@
// +build windows
package main
const usage = `Telegraf, The plugin-driven server agent for collecting and reporting metrics.
Usage:
telegraf [commands|flags]
The commands & flags are:
config print out full sample configuration to stdout
version print the version to stdout
--config <file> configuration file to load
--test gather metrics once, print them to stdout, and exit
--config-directory directory containing additional *.conf files
--input-filter filter the input plugins to enable, separator is :
--output-filter filter the output plugins to enable, separator is :
--usage print usage for a plugin, ie, 'telegraf --usage mysql'
--debug print metrics as they're generated to stdout
--pprof-addr pprof address to listen on, format: localhost:6060 or :6060
--quiet run in quiet mode
--console run as console application
--service operate on service, one of: install, uninstall, start, stop
Examples:
# generate a telegraf config file:
telegraf config > telegraf.conf
# generate config with only cpu input & influxdb output plugins defined
telegraf --input-filter cpu --output-filter influxdb config
# run a single telegraf collection, outputing metrics to stdout
telegraf --config telegraf.conf --test
# run telegraf with all plugins defined in config file
telegraf --config telegraf.conf
# run telegraf, enabling the cpu & memory input, and influxdb output plugins
telegraf --config telegraf.conf --input-filter cpu:mem --output-filter influxdb
# run telegraf with pprof
telegraf --config telegraf.conf --pprof-addr localhost:6060
# run telegraf without service controller
telegraf --console install --config "C:\Program Files\Telegraf\telegraf.conf"
# install telegraf service
telegraf --service install --config "C:\Program Files\Telegraf\telegraf.conf"
`

93
docker-compose.yml Normal file
View File

@@ -0,0 +1,93 @@
version: '3'
services:
aerospike:
image: aerospike/aerospike-server:3.9.0
ports:
- "3000:3000"
zookeeper:
image: wurstmeister/zookeeper
environment:
- JAVA_OPTS="-Xms256m -Xmx256m"
ports:
- "2181:2181"
kafka:
image: wurstmeister/kafka
environment:
- KAFKA_ADVERTISED_HOST_NAME=localhost
- KAFKA_ADVERTISED_PORT=9092
- KAFKA_ZOOKEEPER_CONNECT=zookeeper:2181
- KAFKA_CREATE_TOPICS="test:1:1"
- JAVA_OPTS="-Xms256m -Xmx256m"
ports:
- "9092:9092"
depends_on:
- zookeeper
elasticsearch:
image: elasticsearch:5
environment:
- JAVA_OPTS="-Xms256m -Xmx256m"
ports:
- "9200:9200"
- "9300:9300"
mysql:
image: mysql
environment:
- MYSQL_ALLOW_EMPTY_PASSWORD=yes
ports:
- "3306:3306"
memcached:
image: memcached
ports:
- "11211:11211"
postgres:
image: postgres:alpine
ports:
- "5432:5432"
rabbitmq:
image: rabbitmq:3-management
ports:
- "15672:15672"
- "5672:5672"
redis:
image: redis:alpine
ports:
- "6379:6379"
nsq:
image: nsqio/nsq
ports:
- "4150:4150"
command: "/nsqd"
mqtt:
image: ncarlier/mqtt
ports:
- "1883:1883"
riemann:
image: stealthly/docker-riemann
ports:
- "5555:5555"
nats:
image: nats
ports:
- "4222:4222"
openldap:
image: cobaugh/openldap-alpine
environment:
- SLAPD_CONFIG_ROOTDN="cn=manager,cn=config"
- SLAPD_CONFIG_ROOTPW="secret"
ports:
- "389:389"
- "636:636"
crate:
image: crate/crate
ports:
- "4200:4200"
- "4230:4230"
command:
- crate
- -Cnetwork.host=0.0.0.0
- -Ctransport.host=localhost
- -Clicense.enterprise=false
environment:
- CRATE_HEAP_SIZE=128m
- JAVA_OPTS='-Xms256m -Xmx256m'

View File

@@ -39,6 +39,11 @@ metrics as they pass through Telegraf:
Both Aggregators and Processors analyze metrics as they pass through Telegraf.
Use [measurement filtering](CONFIGURATION.md#measurement-filtering)
to control which metrics are passed through a processor or aggregator. If a
metric is filtered out the metric bypasses the plugin and is passed downstream
to the next plugin.
**Processor** plugins process metrics as they pass through and immediately emit
results based on the values they process. For example, this could be printing
all metrics or adding a tag to all metrics that pass through.

View File

@@ -24,6 +24,22 @@ Environment variables can be used anywhere in the config file, simply prepend
them with $. For strings the variable must be within quotes (ie, "$STR_VAR"),
for numbers and booleans they should be plain (ie, $INT_VAR, $BOOL_VAR)
When using the `.deb` or `.rpm` packages, you can define environment variables
in the `/etc/default/telegraf` file.
## Configuration file locations
The location of the configuration file can be set via the `--config` command
line flag.
When the `--config-directory` command line flag is used files ending with
`.conf` in the specified directory will also be included in the Telegraf
configuration.
On most systems, the default locations are `/etc/telegraf/telegraf.conf` for
the main configuration file and `/etc/telegraf/telegraf.d` for the directory of
configuration files.
# Global Tags
Global tags can be specified in the `[global_tags]` section of the config file
@@ -56,11 +72,14 @@ interval. Maximum flush_interval will be flush_interval + flush_jitter
This is primarily to avoid
large write spikes for users running a large number of telegraf instances.
ie, a jitter of 5s and flush_interval 10s means flushes will happen every 10-15s.
* **precision**: By default, precision will be set to the same timestamp order
as the collection interval, with the maximum being 1s. Precision will NOT
be used for service inputs, such as logparser and statsd. Valid values are
"ns", "us" (or "µs"), "ms", "s".
* **logfile**: Specify the log file name. The empty string means to log to stdout.
* **precision**:
By default or when set to "0s", precision will be set to the same
timestamp order as the collection interval, with the maximum being 1s.
Precision will NOT be used for service inputs. It is up to each individual
service input to set the timestamp at the appropriate precision.
Valid time units are "ns", "us" (or "µs"), "ms", "s".
* **logfile**: Specify the log file name. The empty string means to log to stderr.
* **debug**: Run telegraf in debug mode.
* **quiet**: Run telegraf in quiet mode (error messages only).
* **hostname**: Override default hostname, if empty use os.Hostname().
@@ -79,9 +98,13 @@ you can configure that here.
* **name_suffix**: Specifies a suffix to attach to the measurement name.
* **tags**: A map of tags to apply to a specific input's measurements.
The [measurement filtering](#measurement-filtering) parameters can be used to
limit what metrics are emitted from the input plugin.
## Output Configuration
There are no generic configuration options available for all outputs.
The [measurement filtering](#measurement-filtering) parameters can be used to
limit what metrics are emitted from the output plugin.
## Aggregator Configuration
@@ -102,6 +125,10 @@ aggregator and will not get sent to the output plugins.
* **name_suffix**: Specifies a suffix to attach to the measurement name.
* **tags**: A map of tags to apply to a specific input's measurements.
The [measurement filtering](#measurement-filtering) parameters can be used to
limit what metrics are handled by the aggregator. Excluded metrics are passed
downstream to the next aggregator.
## Processor Configuration
The following config parameters are available for all processors:
@@ -109,36 +136,50 @@ The following config parameters are available for all processors:
* **order**: This is the order in which the processor(s) get executed. If this
is not specified then processor execution order will be random.
The [measurement filtering](#measurement-filtering) parameters can be used
to limit what metrics are handled by the processor. Excluded metrics are
passed downstream to the next processor.
#### Measurement Filtering
Filters can be configured per input, output, processor, or aggregator,
see below for examples.
* **namepass**: An array of strings that is used to filter metrics generated by the
current input. Each string in the array is tested as a glob match against
measurement names and if it matches, the field is emitted.
* **namedrop**: The inverse of pass, if a measurement name matches, it is not emitted.
* **fieldpass**: An array of strings that is used to filter metrics generated by the
current input. Each string in the array is tested as a glob match against field names
and if it matches, the field is emitted. fieldpass is not available for outputs.
* **fielddrop**: The inverse of pass, if a field name matches, it is not emitted.
fielddrop is not available for outputs.
* **tagpass**: tag names and arrays of strings that are used to filter
measurements by the current input. Each string in the array is tested as a glob
match against the tag name, and if it matches the measurement is emitted.
* **tagdrop**: The inverse of tagpass. If a tag matches, the measurement is not
emitted. This is tested on measurements that have passed the tagpass test.
* **tagexclude**: tagexclude can be used to exclude a tag from measurement(s).
As opposed to tagdrop, which will drop an entire measurement based on it's
tags, tagexclude simply strips the given tag keys from the measurement. This
can be used on inputs & outputs, but it is _recommended_ to be used on inputs,
as it is more efficient to filter out tags at the ingestion point.
* **taginclude**: taginclude is the inverse of tagexclude. It will only include
the tag keys in the final measurement.
* **namepass**:
An array of glob pattern strings. Only points whose measurement name matches
a pattern in this list are emitted.
* **namedrop**:
The inverse of `namepass`. If a match is found the point is discarded. This
is tested on points after they have passed the `namepass` test.
* **fieldpass**:
An array of glob pattern strings. Only fields whose field key matches a
pattern in this list are emitted.
* **fielddrop**:
The inverse of `fieldpass`. Fields with a field key matching one of the
patterns will be discarded from the point. This is tested on points after
they have passed the `fieldpass` test.
* **tagpass**:
A table mapping tag keys to arrays of glob pattern strings. Only points
that contain a tag key in the table and a tag value matching one of its
patterns is emitted.
* **tagdrop**:
The inverse of `tagpass`. If a match is found the point is discarded. This
is tested on points after they have passed the `tagpass` test.
* **taginclude**:
An array of glob pattern strings. Only tags with a tag key matching one of
the patterns are emitted. In contrast to `tagpass`, which will pass an entire
point based on its tag, `taginclude` removes all non matching tags from the
point. This filter can be used on both inputs & outputs, but it is
_recommended_ to be used on inputs, as it is more efficient to filter out tags
at the ingestion point.
* **tagexclude**:
The inverse of `taginclude`. Tags with a tag key matching one of the patterns
will be discarded from the point.
**NOTE** `tagpass` and `tagdrop` parameters must be defined at the _end_ of
the plugin definition, otherwise subsequent plugin config options will be
interpreted as part of the tagpass/tagdrop map.
**NOTE** Due to the way TOML is parsed, `tagpass` and `tagdrop` parameters
must be defined at the _end_ of the plugin definition, otherwise subsequent
plugin config options will be interpreted as part of the tagpass/tagdrop
tables.
#### Input Configuration Examples
@@ -158,7 +199,6 @@ fields which begin with `time_`.
[[outputs.influxdb]]
url = "http://192.168.59.103:8086" # required.
database = "telegraf" # required.
precision = "s"
# INPUTS
[[inputs.cpu]]
@@ -297,21 +337,18 @@ to avoid measurement collisions:
[[outputs.influxdb]]
urls = [ "http://localhost:8086" ]
database = "telegraf"
precision = "s"
# Drop all measurements that start with "aerospike"
namedrop = ["aerospike*"]
[[outputs.influxdb]]
urls = [ "http://localhost:8086" ]
database = "telegraf-aerospike-data"
precision = "s"
# Only accept aerospike data:
namepass = ["aerospike*"]
[[outputs.influxdb]]
urls = [ "http://localhost:8086" ]
database = "telegraf-cpu0-data"
precision = "s"
# Only store measurements where the tag "cpu" matches the value "cpu0"
[outputs.influxdb.tagpass]
cpu = ["cpu0"]
@@ -351,4 +388,16 @@ to the system load metrics due to the `namepass` parameter.
[[outputs.file]]
files = ["stdout"]
```
```
#### Processor Configuration Examples:
Print only the metrics with `cpu` as the measurement name, all metrics are
passed to the output:
```toml
[[processors.printer]]
namepass = "cpu"
[[outputs.file]]
files = ["/tmp/metrics.out"]
```

View File

@@ -7,6 +7,8 @@ Telegraf is able to parse the following input data formats into metrics:
1. [Graphite](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md#graphite)
1. [Value](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md#value), ie: 45 or "booyah"
1. [Nagios](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md#nagios) (exec input only)
1. [Collectd](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md#collectd)
1. [Dropwizard](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md#dropwizard)
Telegraf metrics, like InfluxDB
[points](https://docs.influxdata.com/influxdb/v0.10/write_protocols/line/),
@@ -40,7 +42,7 @@ example, in the exec plugin:
name_suffix = "_mycollector"
## Data format to consume.
## Each data format has it's own unique set of configuration options, read
## Each data format has its own unique set of configuration options, read
## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
data_format = "json"
@@ -67,7 +69,7 @@ metrics are parsed directly into Telegraf metrics.
name_suffix = "_mycollector"
## Data format to consume.
## Each data format has it's own unique set of configuration options, read
## Each data format has its own unique set of configuration options, read
## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
data_format = "influx"
@@ -117,7 +119,7 @@ For example, if you had this configuration:
name_suffix = "_mycollector"
## Data format to consume.
## Each data format has it's own unique set of configuration options, read
## Each data format has its own unique set of configuration options, read
## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
data_format = "json"
@@ -161,7 +163,7 @@ For example, if the following configuration:
name_suffix = "_mycollector"
## Data format to consume.
## Each data format has it's own unique set of configuration options, read
## Each data format has its own unique set of configuration options, read
## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
data_format = "json"
@@ -232,7 +234,7 @@ name of the plugin.
name_override = "entropy_available"
## Data format to consume.
## Each data format has it's own unique set of configuration options, read
## Each data format has its own unique set of configuration options, read
## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
data_format = "value"
@@ -390,7 +392,7 @@ There are many more options available,
name_suffix = "_mycollector"
## Data format to consume.
## Each data format has it's own unique set of configuration options, read
## Each data format has its own unique set of configuration options, read
## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
data_format = "graphite"
@@ -427,14 +429,227 @@ Note: Nagios Input Data Formats is only supported in `exec` input plugin.
```toml
[[inputs.exec]]
## Commands array
commands = ["/usr/lib/nagios/plugins/check_load", "-w 5,6,7 -c 7,8,9"]
commands = ["/usr/lib/nagios/plugins/check_load -w 5,6,7 -c 7,8,9"]
## measurement name suffix (for separating different commands)
name_suffix = "_mycollector"
## Data format to consume.
## Each data format has it's own unique set of configuration options, read
## Each data format has its own unique set of configuration options, read
## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
data_format = "nagios"
```
# Collectd:
The collectd format parses the collectd binary network protocol. Tags are
created for host, instance, type, and type instance. All collectd values are
added as float64 fields.
For more information about the binary network protocol see
[here](https://collectd.org/wiki/index.php/Binary_protocol).
You can control the cryptographic settings with parser options. Create an
authentication file and set `collectd_auth_file` to the path of the file, then
set the desired security level in `collectd_security_level`.
Additional information including client setup can be found
[here](https://collectd.org/wiki/index.php/Networking_introduction#Cryptographic_setup).
You can also change the path to the typesdb or add additional typesdb using
`collectd_typesdb`.
#### Collectd Configuration:
```toml
[[inputs.socket_listener]]
service_address = "udp://127.0.0.1:25826"
name_prefix = "collectd_"
## Data format to consume.
## Each data format has its own unique set of configuration options, read
## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
data_format = "collectd"
## Authentication file for cryptographic security levels
collectd_auth_file = "/etc/collectd/auth_file"
## One of none (default), sign, or encrypt
collectd_security_level = "encrypt"
## Path of to TypesDB specifications
collectd_typesdb = ["/usr/share/collectd/types.db"]
```
# Dropwizard:
The dropwizard format can parse the JSON representation of a single dropwizard metric registry. By default, tags are parsed from metric names as if they were actual influxdb line protocol keys (`measurement<,tag_set>`) which can be overriden by defining custom [measurement & tag templates](./DATA_FORMATS_INPUT.md#measurement--tag-templates). All field value types are supported, `string`, `number` and `boolean`.
A typical JSON of a dropwizard metric registry:
```json
{
"version": "3.0.0",
"counters" : {
"measurement,tag1=green" : {
"count" : 1
}
},
"meters" : {
"measurement" : {
"count" : 1,
"m15_rate" : 1.0,
"m1_rate" : 1.0,
"m5_rate" : 1.0,
"mean_rate" : 1.0,
"units" : "events/second"
}
},
"gauges" : {
"measurement" : {
"value" : 1
}
},
"histograms" : {
"measurement" : {
"count" : 1,
"max" : 1.0,
"mean" : 1.0,
"min" : 1.0,
"p50" : 1.0,
"p75" : 1.0,
"p95" : 1.0,
"p98" : 1.0,
"p99" : 1.0,
"p999" : 1.0,
"stddev" : 1.0
}
},
"timers" : {
"measurement" : {
"count" : 1,
"max" : 1.0,
"mean" : 1.0,
"min" : 1.0,
"p50" : 1.0,
"p75" : 1.0,
"p95" : 1.0,
"p98" : 1.0,
"p99" : 1.0,
"p999" : 1.0,
"stddev" : 1.0,
"m15_rate" : 1.0,
"m1_rate" : 1.0,
"m5_rate" : 1.0,
"mean_rate" : 1.0,
"duration_units" : "seconds",
"rate_units" : "calls/second"
}
}
}
```
Would get translated into 4 different measurements:
```
measurement,metric_type=counter,tag1=green count=1
measurement,metric_type=meter count=1,m15_rate=1.0,m1_rate=1.0,m5_rate=1.0,mean_rate=1.0
measurement,metric_type=gauge value=1
measurement,metric_type=histogram count=1,max=1.0,mean=1.0,min=1.0,p50=1.0,p75=1.0,p95=1.0,p98=1.0,p99=1.0,p999=1.0
measurement,metric_type=timer count=1,max=1.0,mean=1.0,min=1.0,p50=1.0,p75=1.0,p95=1.0,p98=1.0,p99=1.0,p999=1.0,stddev=1.0,m15_rate=1.0,m1_rate=1.0,m5_rate=1.0,mean_rate=1.0
```
You may also parse a dropwizard registry from any JSON document which contains a dropwizard registry in some inner field.
Eg. to parse the following JSON document:
```json
{
"time" : "2017-02-22T14:33:03.662+02:00",
"tags" : {
"tag1" : "green",
"tag2" : "yellow"
},
"metrics" : {
"counters" : {
"measurement" : {
"count" : 1
}
},
"meters" : {},
"gauges" : {},
"histograms" : {},
"timers" : {}
}
}
```
and translate it into:
```
measurement,metric_type=counter,tag1=green,tag2=yellow count=1 1487766783662000000
```
you simply need to use the following additional configuration properties:
```toml
dropwizard_metric_registry_path = "metrics"
dropwizard_time_path = "time"
dropwizard_time_format = "2006-01-02T15:04:05Z07:00"
dropwizard_tags_path = "tags"
## tag paths per tag are supported too, eg.
#[inputs.yourinput.dropwizard_tag_paths]
# tag1 = "tags.tag1"
# tag2 = "tags.tag2"
```
For more information about the dropwizard json format see
[here](http://metrics.dropwizard.io/3.1.0/manual/json/).
#### Dropwizard Configuration:
```toml
[[inputs.exec]]
## Commands array
commands = ["curl http://localhost:8080/sys/metrics"]
timeout = "5s"
## Data format to consume.
## Each data format has its own unique set of configuration options, read
## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
data_format = "dropwizard"
## Used by the templating engine to join matched values when cardinality is > 1
separator = "_"
## Each template line requires a template pattern. It can have an optional
## filter before the template and separated by spaces. It can also have optional extra
## tags following the template. Multiple tags should be separated by commas and no spaces
## similar to the line protocol format. There can be only one default template.
## Templates support below format:
## 1. filter + template
## 2. filter + template + extra tag(s)
## 3. filter + template with field key
## 4. default template
## By providing an empty template array, templating is disabled and measurements are parsed as influxdb line protocol keys (measurement<,tag_set>)
templates = []
## You may use an appropriate [gjson path](https://github.com/tidwall/gjson#path-syntax)
## to locate the metric registry within the JSON document
# dropwizard_metric_registry_path = "metrics"
## You may use an appropriate [gjson path](https://github.com/tidwall/gjson#path-syntax)
## to locate the default time of the measurements within the JSON document
# dropwizard_time_path = "time"
# dropwizard_time_format = "2006-01-02T15:04:05Z07:00"
## You may use an appropriate [gjson path](https://github.com/tidwall/gjson#path-syntax)
## to locate the tags map within the JSON document
# dropwizard_tags_path = "tags"
## You may even use tag paths per tag
# [inputs.exec.dropwizard_tag_paths]
# tag1 = "tags.tag1"
# tag2 = "tags.tag2"
```

View File

@@ -2,12 +2,12 @@
Telegraf is able to serialize metrics into the following output data formats:
1. [InfluxDB Line Protocol](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md#influx)
1. [JSON](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md#json)
1. [Graphite](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md#graphite)
1. [InfluxDB Line Protocol](#influx)
1. [JSON](#json)
1. [Graphite](#graphite)
Telegraf metrics, like InfluxDB
[points](https://docs.influxdata.com/influxdb/v0.10/write_protocols/line/),
[points](https://docs.influxdata.com/influxdb/latest/concepts/glossary/#point),
are a combination of four basic parts:
1. Measurement Name
@@ -36,7 +36,7 @@ config option, for example, in the `file` output plugin:
files = ["stdout"]
## Data format to output.
## Each data format has it's own unique set of configuration options, read
## Each data format has its own unique set of configuration options, read
## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
data_format = "influx"
@@ -49,8 +49,10 @@ I'll go over below.
# Influx:
There are no additional configuration options for InfluxDB line-protocol. The
metrics are serialized directly into InfluxDB line-protocol.
The `influx` format outputs data as
[InfluxDB Line Protocol](https://docs.influxdata.com/influxdb/latest/write_protocols/line_protocol_tutorial/).
This is the recommended format to use unless another format is required for
interoperability.
### Influx Configuration:
@@ -60,10 +62,24 @@ metrics are serialized directly into InfluxDB line-protocol.
files = ["stdout", "/tmp/metrics.out"]
## Data format to output.
## Each data format has it's own unique set of configuration options, read
## Each data format has its own unique set of configuration options, read
## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
data_format = "influx"
## Maximum line length in bytes. Useful only for debugging.
# influx_max_line_bytes = 0
## When true, fields will be output in ascending lexical order. Enabling
## this option will result in decreased performance and is only recommended
## when you need predictable ordering while debugging.
# influx_sort_fields = false
## When true, Telegraf will output unsigned integers as unsigned values,
## i.e.: `42u`. You will need a version of InfluxDB supporting unsigned
## integer values. Enabling this option will result in field type errors if
## existing data has been written.
# influx_uint_support = false
```
# Graphite:
@@ -96,6 +112,9 @@ tars.cpu-total.us-east-1.cpu.usage_user 0.89 1455320690
tars.cpu-total.us-east-1.cpu.usage_idle 98.09 1455320690
```
Fields with string values will be skipped. Boolean fields will be converted
to 1 (true) or 0 (false).
### Graphite Configuration:
```toml
@@ -104,7 +123,7 @@ tars.cpu-total.us-east-1.cpu.usage_idle 98.09 1455320690
files = ["stdout", "/tmp/metrics.out"]
## Data format to output.
## Each data format has it's own unique set of configuration options, read
## Each data format has its own unique set of configuration options, read
## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
data_format = "graphite"
@@ -143,8 +162,18 @@ The JSON data format serialized Telegraf metrics in json format. The format is:
files = ["stdout", "/tmp/metrics.out"]
## Data format to output.
## Each data format has it's own unique set of configuration options, read
## Each data format has its own unique set of configuration options, read
## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
data_format = "json"
json_timestamp_units = "1ns"
```
By default, the timestamp that is output in JSON data format serialized Telegraf
metrics is in seconds. The precision of this timestamp can be adjusted for any output
by adding the optional `json_timestamp_units` parameter to the configuration for
that output. This parameter can be used to set the timestamp units to nanoseconds (`ns`),
microseconds (`us` or `µs`), milliseconds (`ms`), or seconds (`s`). Note that this
parameter will be truncated to the nearest power of 10 that, so if the `json_timestamp_units`
are set to `15ms` the timestamps for the JSON format serialized Telegraf metrics will be
output in hundredths of a second (`10ms`).

46
docs/FAQ.md Normal file
View File

@@ -0,0 +1,46 @@
# Frequently Asked Questions
### Q: How can I monitor the Docker Engine Host from within a container?
You will need to setup several volume mounts as well as some environment
variables:
```
docker run --name telegraf
-v /:/hostfs:ro
-v /etc:/hostfs/etc:ro
-v /proc:/hostfs/proc:ro
-v /sys:/hostfs/sys:ro
-v /var/run/utmp:/var/run/utmp:ro
-e HOST_ETC=/hostfs/etc
-e HOST_PROC=/hostfs/proc
-e HOST_SYS=/hostfs/sys
-e HOST_MOUNT_PREFIX=/hostfs
telegraf
```
### Q: Why do I get a "no such host" error resolving hostnames that other
programs can resolve?
Go uses a pure Go resolver by default for [name resolution](https://golang.org/pkg/net/#hdr-Name_Resolution).
This resolver behaves differently than the C library functions but is more
efficient when used with the Go runtime.
If you encounter problems or want to use more advanced name resolution methods
that are unsupported by the pure Go resolver, you can switch to the cgo
resolver.
If running manually set:
```
export GODEBUG=netdns=cgo
```
If running as a service add the environment variable to `/etc/default/telegraf`:
```
GODEBUG=netdns=cgo
```
### Q: When will the next version be released?
The latest release date estimate can be viewed on the
[milestones](https://github.com/influxdata/telegraf/milestones) page.

View File

@@ -1,33 +1,107 @@
# List
- github.com/Shopify/sarama [MIT LICENSE](https://github.com/Shopify/sarama/blob/master/MIT-LICENSE)
- github.com/Sirupsen/logrus [MIT LICENSE](https://github.com/Sirupsen/logrus/blob/master/LICENSE)
- github.com/armon/go-metrics [MIT LICENSE](https://github.com/armon/go-metrics/blob/master/LICENSE)
- github.com/boltdb/bolt [MIT LICENSE](https://github.com/boltdb/bolt/blob/master/LICENSE)
- github.com/cenkalti/backoff [MIT LICENSE](https://github.com/cenkalti/backoff/blob/master/LICENSE)
- github.com/dancannon/gorethink [APACHE LICENSE](https://github.com/dancannon/gorethink/blob/master/LICENSE)
- github.com/eapache/go-resiliency [MIT LICENSE](https://github.com/eapache/go-resiliency/blob/master/LICENSE)
- github.com/eapache/queue [MIT LICENSE](https://github.com/eapache/queue/blob/master/LICENSE)
- github.com/fsouza/go-dockerclient [BSD LICENSE](https://github.com/fsouza/go-dockerclient/blob/master/LICENSE)
- github.com/go-sql-driver/mysql [MPL LICENSE](https://github.com/go-sql-driver/mysql/blob/master/LICENSE)
- github.com/gogo/protobuf [BSD LICENSE](https://github.com/gogo/protobuf/blob/master/LICENSE)
- github.com/golang/protobuf [BSD LICENSE](https://github.com/golang/protobuf/blob/master/LICENSE)
- github.com/golang/snappy [BSD LICENSE](https://github.com/golang/snappy/blob/master/LICENSE)
- github.com/hashicorp/go-msgpack [BSD LICENSE](https://github.com/hashicorp/go-msgpack/blob/master/LICENSE)
- github.com/hashicorp/raft [MPL LICENSE](https://github.com/hashicorp/raft/blob/master/LICENSE)
- github.com/hashicorp/raft-boltdb [MPL LICENSE](https://github.com/hashicorp/raft-boltdb/blob/master/LICENSE)
- github.com/kardianos/service [ZLIB LICENSE](https://github.com/kardianos/service/blob/master/LICENSE) (License not named but matches word for word with ZLib)
- github.com/kballard/go-shellquote [MIT LICENSE](https://github.com/kballard/go-shellquote/blob/master/LICENSE)
- github.com/lib/pq [MIT LICENSE](https://github.com/lib/pq/blob/master/LICENSE.md)
- github.com/matttproud/golang_protobuf_extensions [APACHE LICENSE](https://github.com/matttproud/golang_protobuf_extensions/blob/master/LICENSE)
- github.com/naoina/go-stringutil [MIT LICENSE](https://github.com/naoina/go-stringutil/blob/master/LICENSE)
- github.com/naoina/toml [MIT LICENSE](https://github.com/naoina/toml/blob/master/LICENSE)
- github.com/prometheus/client_golang [APACHE LICENSE](https://github.com/prometheus/client_golang/blob/master/LICENSE)
- github.com/samuel/go-zookeeper [BSD LICENSE](https://github.com/samuel/go-zookeeper/blob/master/LICENSE)
- github.com/stretchr/objx [MIT LICENSE](github.com/stretchr/objx)
- github.com/stretchr/testify [MIT LICENSE](https://github.com/stretchr/testify/blob/master/LICENCE.txt)
- github.com/wvanbergen/kafka [MIT LICENSE](https://github.com/wvanbergen/kafka/blob/master/LICENSE)
- github.com/wvanbergen/kazoo-go [MIT LICENSE](https://github.com/wvanbergen/kazoo-go/blob/master/MIT-LICENSE)
- gopkg.in/dancannon/gorethink.v1 [APACHE LICENSE](https://github.com/dancannon/gorethink/blob/v1.1.2/LICENSE)
- gopkg.in/mgo.v2 [BSD LICENSE](https://github.com/go-mgo/mgo/blob/v2/LICENSE)
- golang.org/x/crypto/ [BSD LICENSE](https://github.com/golang/crypto/blob/master/LICENSE)
# Licenses of dependencies
When distributed in a binary form, Telegraf may contain portions of the
following works:
- collectd.org [MIT](https://github.com/collectd/go-collectd/blob/master/LICENSE)
- github.com/aerospike/aerospike-client-go [APACHE](https://github.com/aerospike/aerospike-client-go/blob/master/LICENSE)
- github.com/amir/raidman [PUBLIC DOMAIN](https://github.com/amir/raidman/blob/master/UNLICENSE)
- github.com/armon/go-metrics [MIT](https://github.com/armon/go-metrics/blob/master/LICENSE)
- github.com/aws/aws-sdk-go [APACHE](https://github.com/aws/aws-sdk-go/blob/master/LICENSE.txt)
- github.com/beorn7/perks [MIT](https://github.com/beorn7/perks/blob/master/LICENSE)
- github.com/boltdb/bolt [MIT](https://github.com/boltdb/bolt/blob/master/LICENSE)
- github.com/bsm/sarama-cluster [MIT](https://github.com/bsm/sarama-cluster/blob/master/LICENSE)
- github.com/cenkalti/backoff [MIT](https://github.com/cenkalti/backoff/blob/master/LICENSE)
- github.com/chuckpreslar/rcon [MIT](https://github.com/chuckpreslar/rcon#license)
- github.com/couchbase/go-couchbase [MIT](https://github.com/couchbase/go-couchbase/blob/master/LICENSE)
- github.com/couchbase/gomemcached [MIT](https://github.com/couchbase/gomemcached/blob/master/LICENSE)
- github.com/couchbase/goutils [MIT](https://github.com/couchbase/go-couchbase/blob/master/LICENSE)
- github.com/dancannon/gorethink [APACHE](https://github.com/dancannon/gorethink/blob/master/LICENSE)
- github.com/davecgh/go-spew [ISC](https://github.com/davecgh/go-spew/blob/master/LICENSE)
- github.com/docker/docker [APACHE](https://github.com/docker/docker/blob/master/LICENSE)
- github.com/docker/cli [APACHE](https://github.com/docker/cli/blob/master/LICENSE)
- github.com/eapache/go-resiliency [MIT](https://github.com/eapache/go-resiliency/blob/master/LICENSE)
- github.com/eapache/go-xerial-snappy [MIT](https://github.com/eapache/go-xerial-snappy/blob/master/LICENSE)
- github.com/eapache/queue [MIT](https://github.com/eapache/queue/blob/master/LICENSE)
- github.com/eclipse/paho.mqtt.golang [ECLIPSE](https://github.com/eclipse/paho.mqtt.golang/blob/master/LICENSE)
- github.com/fsnotify/fsnotify [BSD](https://github.com/fsnotify/fsnotify/blob/master/LICENSE)
- github.com/fsouza/go-dockerclient [BSD](https://github.com/fsouza/go-dockerclient/blob/master/LICENSE)
- github.com/gobwas/glob [MIT](https://github.com/gobwas/glob/blob/master/LICENSE)
- github.com/google/go-cmp [BSD](https://github.com/google/go-cmp/blob/master/LICENSE)
- github.com/gogo/protobuf [BSD](https://github.com/gogo/protobuf/blob/master/LICENSE)
- github.com/golang/protobuf [BSD](https://github.com/golang/protobuf/blob/master/LICENSE)
- github.com/golang/snappy [BSD](https://github.com/golang/snappy/blob/master/LICENSE)
- github.com/go-logfmt/logfmt [MIT](https://github.com/go-logfmt/logfmt/blob/master/LICENSE)
- github.com/gorilla/mux [BSD](https://github.com/gorilla/mux/blob/master/LICENSE)
- github.com/go-ini/ini [APACHE](https://github.com/go-ini/ini/blob/master/LICENSE)
- github.com/go-ole/go-ole [MPL](http://mattn.mit-license.org/2013)
- github.com/go-sql-driver/mysql [MPL](https://github.com/go-sql-driver/mysql/blob/master/LICENSE)
- github.com/hailocab/go-hostpool [MIT](https://github.com/hailocab/go-hostpool/blob/master/LICENSE)
- github.com/hashicorp/consul [MPL](https://github.com/hashicorp/consul/blob/master/LICENSE)
- github.com/hashicorp/go-msgpack [BSD](https://github.com/hashicorp/go-msgpack/blob/master/LICENSE)
- github.com/hashicorp/raft-boltdb [MPL](https://github.com/hashicorp/raft-boltdb/blob/master/LICENSE)
- github.com/hashicorp/raft [MPL](https://github.com/hashicorp/raft/blob/master/LICENSE)
- github.com/influxdata/tail [MIT](https://github.com/influxdata/tail/blob/master/LICENSE.txt)
- github.com/influxdata/toml [MIT](https://github.com/influxdata/toml/blob/master/LICENSE)
- github.com/influxdata/wlog [MIT](https://github.com/influxdata/wlog/blob/master/LICENSE)
- github.com/jackc/pgx [MIT](https://github.com/jackc/pgx/blob/master/LICENSE)
- github.com/jmespath/go-jmespath [APACHE](https://github.com/jmespath/go-jmespath/blob/master/LICENSE)
- github.com/kardianos/osext [BSD](https://github.com/kardianos/osext/blob/master/LICENSE)
- github.com/kardianos/service [ZLIB](https://github.com/kardianos/service/blob/master/LICENSE) (License not named but matches word for word with ZLib)
- github.com/kballard/go-shellquote [MIT](https://github.com/kballard/go-shellquote/blob/master/LICENSE)
- github.com/lib/pq [MIT](https://github.com/lib/pq/blob/master/LICENSE.md)
- github.com/matttproud/golang_protobuf_extensions [APACHE](https://github.com/matttproud/golang_protobuf_extensions/blob/master/LICENSE)
- github.com/Microsoft/go-winio [MIT](https://github.com/Microsoft/go-winio/blob/master/LICENSE)
- github.com/miekg/dns [BSD](https://github.com/miekg/dns/blob/master/LICENSE)
- github.com/naoina/go-stringutil [MIT](https://github.com/naoina/go-stringutil/blob/master/LICENSE)
- github.com/naoina/toml [MIT](https://github.com/naoina/toml/blob/master/LICENSE)
- github.com/nats-io/gnatsd [MIT](https://github.com/nats-io/gnatsd/blob/master/LICENSE)
- github.com/nats-io/go-nats [MIT](https://github.com/nats-io/go-nats/blob/master/LICENSE)
- github.com/nats-io/nats [MIT](https://github.com/nats-io/nats/blob/master/LICENSE)
- github.com/nats-io/nuid [MIT](https://github.com/nats-io/nuid/blob/master/LICENSE)
- github.com/nsqio/go-nsq [MIT](https://github.com/nsqio/go-nsq/blob/master/LICENSE)
- github.com/opentracing-contrib/go-observer [APACHE](https://github.com/opentracing-contrib/go-observer/blob/master/LICENSE)
- github.com/opentracing/opentracing-go [MIT](https://github.com/opentracing/opentracing-go/blob/master/LICENSE)
- github.com/openzipkin/zipkin-go-opentracing [MIT](https://github.com/openzipkin/zipkin-go-opentracing/blob/master/LICENSE)
- github.com/pierrec/lz4 [BSD](https://github.com/pierrec/lz4/blob/master/LICENSE)
- github.com/pierrec/xxHash [BSD](https://github.com/pierrec/xxHash/blob/master/LICENSE)
- github.com/pkg/errors [BSD](https://github.com/pkg/errors/blob/master/LICENSE)
- github.com/pmezard/go-difflib [BSD](https://github.com/pmezard/go-difflib/blob/master/LICENSE)
- github.com/prometheus/client_golang [APACHE](https://github.com/prometheus/client_golang/blob/master/LICENSE)
- github.com/prometheus/client_model [APACHE](https://github.com/prometheus/client_model/blob/master/LICENSE)
- github.com/prometheus/common [APACHE](https://github.com/prometheus/common/blob/master/LICENSE)
- github.com/prometheus/procfs [APACHE](https://github.com/prometheus/procfs/blob/master/LICENSE)
- github.com/rcrowley/go-metrics [BSD](https://github.com/rcrowley/go-metrics/blob/master/LICENSE)
- github.com/samuel/go-zookeeper [BSD](https://github.com/samuel/go-zookeeper/blob/master/LICENSE)
- github.com/satori/go.uuid [MIT](https://github.com/satori/go.uuid/blob/master/LICENSE)
- github.com/shirou/gopsutil [BSD](https://github.com/shirou/gopsutil/blob/master/LICENSE)
- github.com/shirou/w32 [BSD](https://github.com/shirou/w32/blob/master/LICENSE)
- github.com/Shopify/sarama [MIT](https://github.com/Shopify/sarama/blob/master/MIT-LICENSE)
- github.com/Sirupsen/logrus [MIT](https://github.com/Sirupsen/logrus/blob/master/LICENSE)
- github.com/StackExchange/wmi [MIT](https://github.com/StackExchange/wmi/blob/master/LICENSE)
- github.com/stretchr/objx [MIT](https://github.com/stretchr/objx/blob/master/LICENSE.md)
- github.com/soniah/gosnmp [BSD](https://github.com/soniah/gosnmp/blob/master/LICENSE)
- github.com/streadway/amqp [BSD](https://github.com/streadway/amqp/blob/master/LICENSE)
- github.com/stretchr/objx [MIT](https://github.com/stretchr/objx/blob/master/LICENSE.md)
- github.com/stretchr/testify [MIT](https://github.com/stretchr/testify/blob/master/LICENCE.txt)
- github.com/tidwall/gjson [MIT](https://github.com/tidwall/gjson/blob/master/LICENSE)
- github.com/tidwall/match [MIT](https://github.com/tidwall/match/blob/master/LICENSE)
- github.com/mitchellh/mapstructure [MIT](https://github.com/mitchellh/mapstructure/blob/master/LICENSE)
- github.com/multiplay/go-ts3 [BSD](https://github.com/multiplay/go-ts3/blob/master/LICENSE)
- github.com/vjeantet/grok [APACHE](https://github.com/vjeantet/grok/blob/master/LICENSE)
- github.com/wvanbergen/kafka [MIT](https://github.com/wvanbergen/kafka/blob/master/LICENSE)
- github.com/wvanbergen/kazoo-go [MIT](https://github.com/wvanbergen/kazoo-go/blob/master/MIT-LICENSE)
- github.com/yuin/gopher-lua [MIT](https://github.com/yuin/gopher-lua/blob/master/LICENSE)
- github.com/zensqlmonitor/go-mssqldb [BSD](https://github.com/zensqlmonitor/go-mssqldb/blob/master/LICENSE.txt)
- golang.org/x/crypto [BSD](https://github.com/golang/crypto/blob/master/LICENSE)
- golang.org/x/net [BSD](https://go.googlesource.com/net/+/master/LICENSE)
- golang.org/x/text [BSD](https://go.googlesource.com/text/+/master/LICENSE)
- golang.org/x/sys [BSD](https://go.googlesource.com/sys/+/master/LICENSE)
- gopkg.in/asn1-ber.v1 [MIT](https://github.com/go-asn1-ber/asn1-ber/blob/v1.2/LICENSE)
- gopkg.in/dancannon/gorethink.v1 [APACHE](https://github.com/dancannon/gorethink/blob/v1.1.2/LICENSE)
- gopkg.in/fatih/pool.v2 [MIT](https://github.com/fatih/pool/blob/v2.0.0/LICENSE)
- gopkg.in/ldap.v2 [MIT](https://github.com/go-ldap/ldap/blob/v2.5.0/LICENSE)
- gopkg.in/mgo.v2 [BSD](https://github.com/go-mgo/mgo/blob/v2/LICENSE)
- gopkg.in/olivere/elastic.v5 [MIT](https://github.com/olivere/elastic/blob/v5.0.38/LICENSE)
- gopkg.in/tomb.v1 [BSD](https://github.com/go-tomb/tomb/blob/v1/LICENSE)
- gopkg.in/yaml.v2 [APACHE](https://github.com/go-yaml/yaml/blob/v2/LICENSE)

24
docs/PROFILING.md Normal file
View File

@@ -0,0 +1,24 @@
# Telegraf profiling
Telegraf uses the standard package `net/http/pprof`. This package serves via its HTTP server runtime profiling data in the format expected by the pprof visualization tool.
By default, the profiling is turned off.
To enable profiling you need to specify address to config parameter `pprof-addr`, for example:
```
telegraf --config telegraf.conf --pprof-addr localhost:6060
```
There are several paths to get different profiling information:
To look at the heap profile:
`go tool pprof http://localhost:6060/debug/pprof/heap`
or to look at a 30-second CPU profile:
`go tool pprof http://localhost:6060/debug/pprof/profile?seconds=30`
To view all available profiles, open `http://localhost:6060/debug/pprof/` in your browser.

View File

@@ -5,7 +5,7 @@ the general steps to set it up.
1. Obtain the telegraf windows distribution
2. Create the directory `C:\Program Files\Telegraf` (if you install in a different
location simply specify the `-config` parameter with the desired location)
location simply specify the `--config` parameter with the desired location)
3. Place the telegraf.exe and the telegraf.conf config file into `C:\Program Files\Telegraf`
4. To install the service into the Windows Service Manager, run the following in PowerShell as an administrator (If necessary, you can wrap any spaces in the file paths in double quotes ""):
@@ -26,6 +26,15 @@ the general steps to set it up.
> net start telegraf
```
## Config Directory
You can also specify a `--config-directory` for the service to use:
1. Create a directory for config snippets: `C:\Program Files\Telegraf\telegraf.d`
2. Include the `--config-directory` option when registering the service:
```
> C:\"Program Files"\Telegraf\telegraf.exe --service install --config C:\"Program Files"\Telegraf\telegraf.conf --config-directory C:\"Program Files"\Telegraf\telegraf.d
```
## Other supported operations
Telegraf can manage its own service through the --service flag:
@@ -37,3 +46,8 @@ Telegraf can manage its own service through the --service flag:
| `telegraf.exe --service start` | Start the telegraf service |
| `telegraf.exe --service stop` | Stop the telegraf service |
Troubleshooting common error #1067
When installing as service in Windows, always double check to specify full path of the config file, otherwise windows service will fail to start
--config C:\"Program Files"\Telegraf\telegraf.conf

File diff suppressed because it is too large Load Diff

View File

@@ -63,8 +63,8 @@
# The full HTTP or UDP endpoint URL for your InfluxDB instance.
# Multiple urls can be specified but it is assumed that they are part of the same
# cluster, this means that only ONE of the urls will be written to each interval.
# urls = ["udp://localhost:8089"] # UDP endpoint example
urls = ["http://localhost:8086"] # required
# urls = ["udp://127.0.0.1:8089"] # UDP endpoint example
urls = ["http://127.0.0.1:8086"] # required
# The target database for metrics (telegraf will create it if not exists)
database = "telegraf" # required
# Precision of writes, valid values are "ns", "us" (or "µs"), "ms", "s", "m", "h".
@@ -105,10 +105,11 @@
"% Privileged Time",
"% User Time",
"% Processor Time",
"% DPC Time",
]
Measurement = "win_cpu"
# Set to true to include _Total instance when querying for all (*).
#IncludeTotal=false
IncludeTotal=true
[[inputs.win_perf_counters.object]]
# Disk times and queues
@@ -116,21 +117,54 @@
Instances = ["*"]
Counters = [
"% Idle Time",
"% Disk Time","% Disk Read Time",
"% Disk Time",
"% Disk Read Time",
"% Disk Write Time",
"% User Time",
"Current Disk Queue Length",
"% Free Space",
"Free Megabytes",
]
Measurement = "win_disk"
# Set to true to include _Total instance when querying for all (*).
#IncludeTotal=false
[[inputs.win_perf_counters.object]]
ObjectName = "PhysicalDisk"
Instances = ["*"]
Counters = [
"Disk Read Bytes/sec",
"Disk Write Bytes/sec",
"Current Disk Queue Length",
"Disk Reads/sec",
"Disk Writes/sec",
"% Disk Time",
"% Disk Read Time",
"% Disk Write Time",
]
Measurement = "win_diskio"
[[inputs.win_perf_counters.object]]
ObjectName = "Network Interface"
Instances = ["*"]
Counters = [
"Bytes Received/sec",
"Bytes Sent/sec",
"Packets Received/sec",
"Packets Sent/sec",
"Packets Received Discarded",
"Packets Outbound Discarded",
"Packets Received Errors",
"Packets Outbound Errors",
]
Measurement = "win_net"
[[inputs.win_perf_counters.object]]
ObjectName = "System"
Counters = [
"Context Switches/sec",
"System Calls/sec",
"Processor Queue Length",
"System Up Time",
]
Instances = ["------"]
Measurement = "win_system"
@@ -150,6 +184,10 @@
"Transition Faults/sec",
"Pool Nonpaged Bytes",
"Pool Paged Bytes",
"Standby Cache Reserve Bytes",
"Standby Cache Normal Priority Bytes",
"Standby Cache Core Bytes",
]
# Use 6 x - to remove the Instance bit from the query.
Instances = ["------"]
@@ -157,6 +195,31 @@
# Set to true to include _Total instance when querying for all (*).
#IncludeTotal=false
[[inputs.win_perf_counters.object]]
# Example query where the Instance portion must be removed to get data back,
# such as from the Paging File object.
ObjectName = "Paging File"
Counters = [
"% Usage",
]
Instances = ["_Total"]
Measurement = "win_swap"
[[inputs.win_perf_counters.object]]
ObjectName = "Network Interface"
Instances = ["*"]
Counters = [
"Bytes Sent/sec",
"Bytes Received/sec",
"Packets Sent/sec",
"Packets Received/sec",
"Packets Received Discarded",
"Packets Received Errors",
"Packets Outbound Discarded",
"Packets Outbound Errors",
]
# Windows system plugins using WMI (disabled by default, using
# win_perf_counters over WMI is recommended)

View File

@@ -77,3 +77,40 @@ func compileFilterNoGlob(filters []string) Filter {
}
return &out
}
type IncludeExcludeFilter struct {
include Filter
exclude Filter
}
func NewIncludeExcludeFilter(
include []string,
exclude []string,
) (Filter, error) {
in, err := Compile(include)
if err != nil {
return nil, err
}
ex, err := Compile(exclude)
if err != nil {
return nil, err
}
return &IncludeExcludeFilter{in, ex}, nil
}
func (f *IncludeExcludeFilter) Match(s string) bool {
if f.include != nil {
if !f.include.Match(s) {
return false
}
}
if f.exclude != nil {
if f.exclude.Match(s) {
return false
}
}
return true
}

View File

@@ -45,9 +45,11 @@ func (b *Buffer) Add(metrics ...telegraf.Metric) {
select {
case b.buf <- metrics[i]:
default:
b.mu.Lock()
MetricsDropped.Incr(1)
<-b.buf
b.buf <- metrics[i]
b.mu.Unlock()
}
}
}

View File

@@ -6,8 +6,10 @@ import (
"fmt"
"io/ioutil"
"log"
"math"
"os"
"path/filepath"
"regexp"
"runtime"
"sort"
@@ -25,7 +27,6 @@ import (
"github.com/influxdata/telegraf/plugins/processors"
"github.com/influxdata/telegraf/plugins/serializers"
"github.com/influxdata/config"
"github.com/influxdata/toml"
"github.com/influxdata/toml/ast"
)
@@ -40,6 +41,11 @@ var (
// envVarRe is a regex to find environment variables in the config file
envVarRe = regexp.MustCompile(`\$\w+`)
envVarEscaper = strings.NewReplacer(
`"`, `\"`,
`\`, `\\`,
)
)
// Config specifies the URL/user/password for the database that telegraf
@@ -85,8 +91,8 @@ type AgentConfig struct {
// ie, if Interval=10s then always collect on :00, :10, :20, etc.
RoundInterval bool
// By default, precision will be set to the same timestamp order as the
// collection interval, with the maximum being 1s.
// By default or when set to "0s", precision will be set to the same
// timestamp order as the collection interval, with the maximum being 1s.
// ie, when interval = "10s", precision will be "1s"
// when interval = "250ms", precision will be "1ms"
// Precision will NOT be used for service inputs. It is up to each individual
@@ -126,7 +132,7 @@ type AgentConfig struct {
// TODO(cam): Remove UTC and parameter, they are no longer
// valid for the agent config. Leaving them here for now for backwards-
// compatability
// compatibility
UTC bool `toml:"utc"`
// Debug is the option for running in debug mode
@@ -230,10 +236,13 @@ var header = `# Telegraf Configuration
## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s
flush_jitter = "0s"
## By default, precision will be set to the same timestamp order as the
## collection interval, with the maximum being 1s.
## Precision will NOT be used for service inputs, such as logparser and statsd.
## Valid values are "ns", "us" (or "µs"), "ms", "s".
## By default or when set to "0s", precision will be set to the same
## timestamp order as the collection interval, with the maximum being 1s.
## ie, when interval = "10s", precision will be "1s"
## when interval = "250ms", precision will be "1ms"
## Precision will NOT be used for service inputs. It is up to each individual
## service input to set the timestamp at the appropriate precision.
## Valid time units are "ns", "us" (or "µs"), "ms", "s".
precision = ""
## Logging configuration:
@@ -506,7 +515,17 @@ func PrintOutputConfig(name string) error {
func (c *Config) LoadDirectory(path string) error {
walkfn := func(thispath string, info os.FileInfo, _ error) error {
if info == nil {
log.Printf("W! Telegraf is not permitted to read %s", thispath)
return nil
}
if info.IsDir() {
if strings.HasPrefix(info.Name(), "..") {
// skip Kubernetes mounts, prevening loading the same config twice
return filepath.SkipDir
}
return nil
}
name := info.Name()
@@ -566,7 +585,7 @@ func (c *Config) LoadConfig(path string) error {
if !ok {
return fmt.Errorf("%s: invalid configuration", path)
}
if err = config.UnmarshalTable(subTable, c.Tags); err != nil {
if err = toml.UnmarshalTable(subTable, c.Tags); err != nil {
log.Printf("E! Could not parse [global_tags] config\n")
return fmt.Errorf("Error parsing %s, %s", path, err)
}
@@ -579,7 +598,7 @@ func (c *Config) LoadConfig(path string) error {
if !ok {
return fmt.Errorf("%s: invalid configuration", path)
}
if err = config.UnmarshalTable(subTable, c.Agent); err != nil {
if err = toml.UnmarshalTable(subTable, c.Agent); err != nil {
log.Printf("E! Could not parse [agent] config\n")
return fmt.Errorf("Error parsing %s, %s", path, err)
}
@@ -676,12 +695,17 @@ func (c *Config) LoadConfig(path string) error {
}
// trimBOM trims the Byte-Order-Marks from the beginning of the file.
// this is for Windows compatability only.
// this is for Windows compatibility only.
// see https://github.com/influxdata/telegraf/issues/1378
func trimBOM(f []byte) []byte {
return bytes.TrimPrefix(f, []byte("\xef\xbb\xbf"))
}
// escapeEnv escapes a value for inserting into a TOML string.
func escapeEnv(value string) string {
return envVarEscaper.Replace(value)
}
// parseFile loads a TOML configuration from a provided path and
// returns the AST produced from the TOML parser. When loading the file, it
// will find environment variables and replace them.
@@ -695,8 +719,9 @@ func parseFile(fpath string) (*ast.Table, error) {
env_vars := envVarRe.FindAll(contents, -1)
for _, env_var := range env_vars {
env_val := os.Getenv(strings.TrimPrefix(string(env_var), "$"))
if env_val != "" {
env_val, ok := os.LookupEnv(strings.TrimPrefix(string(env_var), "$"))
if ok {
env_val = escapeEnv(env_val)
contents = bytes.Replace(contents, env_var, []byte(env_val), 1)
}
}
@@ -716,7 +741,7 @@ func (c *Config) addAggregator(name string, table *ast.Table) error {
return err
}
if err := config.UnmarshalTable(table, aggregator); err != nil {
if err := toml.UnmarshalTable(table, aggregator); err != nil {
return err
}
@@ -736,7 +761,7 @@ func (c *Config) addProcessor(name string, table *ast.Table) error {
return err
}
if err := config.UnmarshalTable(table, processor); err != nil {
if err := toml.UnmarshalTable(table, processor); err != nil {
return err
}
@@ -776,7 +801,7 @@ func (c *Config) addOutput(name string, table *ast.Table) error {
return err
}
if err := config.UnmarshalTable(table, output); err != nil {
if err := toml.UnmarshalTable(table, output); err != nil {
return err
}
@@ -817,7 +842,7 @@ func (c *Config) addInput(name string, table *ast.Table) error {
return err
}
if err := config.UnmarshalTable(table, input); err != nil {
if err := toml.UnmarshalTable(table, input); err != nil {
return err
}
@@ -909,7 +934,7 @@ func buildAggregator(name string, tbl *ast.Table) (*models.AggregatorConfig, err
conf.Tags = make(map[string]string)
if node, ok := tbl.Fields["tags"]; ok {
if subtbl, ok := node.(*ast.Table); ok {
if err := config.UnmarshalTable(subtbl, conf.Tags); err != nil {
if err := toml.UnmarshalTable(subtbl, conf.Tags); err != nil {
log.Printf("Could not parse tags for input %s\n", name)
}
}
@@ -1146,7 +1171,7 @@ func buildInput(name string, tbl *ast.Table) (*models.InputConfig, error) {
cp.Tags = make(map[string]string)
if node, ok := tbl.Fields["tags"]; ok {
if subtbl, ok := node.(*ast.Table); ok {
if err := config.UnmarshalTable(subtbl, cp.Tags); err != nil {
if err := toml.UnmarshalTable(subtbl, cp.Tags); err != nil {
log.Printf("E! Could not parse tags for input %s\n", name)
}
}
@@ -1226,6 +1251,75 @@ func buildParser(name string, tbl *ast.Table) (parsers.Parser, error) {
}
}
if node, ok := tbl.Fields["collectd_auth_file"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if str, ok := kv.Value.(*ast.String); ok {
c.CollectdAuthFile = str.Value
}
}
}
if node, ok := tbl.Fields["collectd_security_level"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if str, ok := kv.Value.(*ast.String); ok {
c.CollectdSecurityLevel = str.Value
}
}
}
if node, ok := tbl.Fields["collectd_typesdb"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if ary, ok := kv.Value.(*ast.Array); ok {
for _, elem := range ary.Value {
if str, ok := elem.(*ast.String); ok {
c.CollectdTypesDB = append(c.CollectdTypesDB, str.Value)
}
}
}
}
}
if node, ok := tbl.Fields["dropwizard_metric_registry_path"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if str, ok := kv.Value.(*ast.String); ok {
c.DropwizardMetricRegistryPath = str.Value
}
}
}
if node, ok := tbl.Fields["dropwizard_time_path"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if str, ok := kv.Value.(*ast.String); ok {
c.DropwizardTimePath = str.Value
}
}
}
if node, ok := tbl.Fields["dropwizard_time_format"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if str, ok := kv.Value.(*ast.String); ok {
c.DropwizardTimeFormat = str.Value
}
}
}
if node, ok := tbl.Fields["dropwizard_tags_path"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if str, ok := kv.Value.(*ast.String); ok {
c.DropwizardTagsPath = str.Value
}
}
}
c.DropwizardTagPathsMap = make(map[string]string)
if node, ok := tbl.Fields["dropwizard_tag_paths"]; ok {
if subtbl, ok := node.(*ast.Table); ok {
for name, val := range subtbl.Fields {
if kv, ok := val.(*ast.KeyValue); ok {
if str, ok := kv.Value.(*ast.String); ok {
c.DropwizardTagPathsMap[name] = str.Value
}
}
}
}
}
c.MetricName = name
delete(tbl.Fields, "data_format")
@@ -1233,6 +1327,14 @@ func buildParser(name string, tbl *ast.Table) (parsers.Parser, error) {
delete(tbl.Fields, "templates")
delete(tbl.Fields, "tag_keys")
delete(tbl.Fields, "data_type")
delete(tbl.Fields, "collectd_auth_file")
delete(tbl.Fields, "collectd_security_level")
delete(tbl.Fields, "collectd_typesdb")
delete(tbl.Fields, "dropwizard_metric_registry_path")
delete(tbl.Fields, "dropwizard_time_path")
delete(tbl.Fields, "dropwizard_time_format")
delete(tbl.Fields, "dropwizard_tags_path")
delete(tbl.Fields, "dropwizard_tag_paths")
return parsers.NewParser(c)
}
@@ -1241,7 +1343,7 @@ func buildParser(name string, tbl *ast.Table) (parsers.Parser, error) {
// a serializers.Serializer object, and creates it, which can then be added onto
// an Output object.
func buildSerializer(name string, tbl *ast.Table) (serializers.Serializer, error) {
c := &serializers.Config{}
c := &serializers.Config{TimestampUnits: time.Duration(1 * time.Second)}
if node, ok := tbl.Fields["data_format"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
@@ -1271,9 +1373,65 @@ func buildSerializer(name string, tbl *ast.Table) (serializers.Serializer, error
}
}
if node, ok := tbl.Fields["influx_max_line_bytes"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if integer, ok := kv.Value.(*ast.Integer); ok {
v, err := integer.Int()
if err != nil {
return nil, err
}
c.InfluxMaxLineBytes = int(v)
}
}
}
if node, ok := tbl.Fields["influx_sort_fields"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if b, ok := kv.Value.(*ast.Boolean); ok {
var err error
c.InfluxSortFields, err = b.Boolean()
if err != nil {
return nil, err
}
}
}
}
if node, ok := tbl.Fields["influx_uint_support"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if b, ok := kv.Value.(*ast.Boolean); ok {
var err error
c.InfluxUintSupport, err = b.Boolean()
if err != nil {
return nil, err
}
}
}
}
if node, ok := tbl.Fields["json_timestamp_units"]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if str, ok := kv.Value.(*ast.String); ok {
timestampVal, err := time.ParseDuration(str.Value)
if err != nil {
return nil, fmt.Errorf("Unable to parse json_timestamp_units as a duration, %s", err)
}
// now that we have a duration, truncate it to the nearest
// power of ten (just in case)
nearest_exponent := int64(math.Log10(float64(timestampVal.Nanoseconds())))
new_nanoseconds := int64(math.Pow(10.0, float64(nearest_exponent)))
c.TimestampUnits = time.Duration(new_nanoseconds)
}
}
}
delete(tbl.Fields, "influx_max_line_bytes")
delete(tbl.Fields, "influx_sort_fields")
delete(tbl.Fields, "influx_uint_support")
delete(tbl.Fields, "data_format")
delete(tbl.Fields, "prefix")
delete(tbl.Fields, "template")
delete(tbl.Fields, "json_timestamp_units")
return serializers.NewSerializer(c)
}

View File

@@ -0,0 +1,4 @@
# This invalid config file should be skipped during testing
# as it is an ..data folder
[[outputs.influxdb

View File

@@ -60,7 +60,7 @@
# Kafka topic for producer messages
topic = "telegraf"
# Telegraf tag to use as a routing key
# ie, if this tag exists, it's value will be used as the routing key
# ie, if this tag exists, its value will be used as the routing key
routing_tag = "host"
@@ -143,19 +143,31 @@
[[inputs.diskio]]
# no configuration
# read metrics from a Kafka topic
# read metrics from a Kafka 0.9+ topic
[[inputs.kafka_consumer]]
# topic(s) to consume
## kafka brokers
brokers = ["localhost:9092"]
## topic(s) to consume
topics = ["telegraf"]
## the name of the consumer group
consumer_group = "telegraf_metrics_consumers"
## Offset (must be either "oldest" or "newest")
offset = "oldest"
# read metrics from a Kafka legacy topic
[[inputs.kafka_consumer_legacy]]
## topic(s) to consume
topics = ["telegraf"]
# an array of Zookeeper connection strings
zookeeper_peers = ["localhost:2181"]
# the name of the consumer group
## the name of the consumer group
consumer_group = "telegraf_metrics_consumers"
# Maximum number of points to buffer between collection intervals
point_buffer = 100000
# Offset (must be either "oldest" or "newest")
## Offset (must be either "oldest" or "newest")
offset = "oldest"
# Read metrics from a LeoFS Server via SNMP
[[inputs.leofs]]
# An array of URI to gather stats about LeoFS.

View File

@@ -1,37 +0,0 @@
package errchan
import (
"fmt"
"strings"
)
type ErrChan struct {
C chan error
}
// New returns an error channel of max length 'n'
// errors can be sent to the ErrChan.C channel, and will be returned when
// ErrChan.Error() is called.
func New(n int) *ErrChan {
return &ErrChan{
C: make(chan error, n),
}
}
// Error closes the ErrChan.C channel and returns an error if there are any
// non-nil errors, otherwise returns nil.
func (e *ErrChan) Error() error {
close(e.C)
var out string
for err := range e.C {
if err != nil {
out += "[" + err.Error() + "], "
}
}
if out != "" {
return fmt.Errorf("Errors encountered: " + strings.TrimRight(out, ", "))
}
return nil
}

View File

@@ -45,7 +45,7 @@ func (g *GlobPath) Match() map[string]os.FileInfo {
if !g.hasMeta {
out := make(map[string]os.FileInfo)
info, err := os.Stat(g.path)
if !os.IsNotExist(err) {
if err == nil {
out[g.path] = info
}
return out
@@ -55,7 +55,7 @@ func (g *GlobPath) Match() map[string]os.FileInfo {
files, _ := filepath.Glob(g.path)
for _, file := range files {
info, err := os.Stat(file)
if !os.IsNotExist(err) {
if err == nil {
out[file] = info
}
}

View File

@@ -1,6 +1,7 @@
package globpath
import (
"os"
"runtime"
"strings"
"testing"
@@ -28,7 +29,7 @@ func TestCompileAndMatch(t *testing.T) {
require.NoError(t, err)
matches := g1.Match()
assert.Len(t, matches, 3)
assert.Len(t, matches, 6)
matches = g2.Match()
assert.Len(t, matches, 2)
matches = g3.Match()
@@ -56,7 +57,34 @@ func TestFindRootDir(t *testing.T) {
}
}
func TestFindNestedTextFile(t *testing.T) {
dir := getTestdataDir()
// test super asterisk
g1, err := Compile(dir + "/**.txt")
require.NoError(t, err)
matches := g1.Match()
assert.Len(t, matches, 1)
}
func getTestdataDir() string {
_, filename, _, _ := runtime.Caller(1)
return strings.Replace(filename, "globpath_test.go", "testdata", 1)
}
func TestMatch_ErrPermission(t *testing.T) {
tests := []struct {
input string
expected map[string]os.FileInfo
}{
{"/root/foo", map[string]os.FileInfo{}},
{"/root/f*", map[string]os.FileInfo{}},
}
for _, test := range tests {
glob, err := Compile(test.input)
require.NoError(t, err)
actual := glob.Match()
require.Equal(t, test.expected, actual)
}
}

View File

View File

@@ -112,9 +112,10 @@ func RandomString(n int) string {
return string(bytes)
}
// GetTLSConfig gets a tls.Config object from the given certs, key, and CA files.
// you must give the full path to the files.
// If all files are blank and InsecureSkipVerify=false, returns a nil pointer.
// GetTLSConfig gets a tls.Config object from the given certs, key, and CA files
// for use with a client.
// The full path to each file must be provided.
// Returns a nil pointer if all files are blank and InsecureSkipVerify=false.
func GetTLSConfig(
SSLCert, SSLKey, SSLCA string,
InsecureSkipVerify bool,
@@ -155,6 +156,50 @@ func GetTLSConfig(
return t, nil
}
// GetServerTLSConfig gets a tls.Config object from the given certs, key, and one or more CA files
// for use with a server.
// The full path to each file must be provided.
// Returns a nil pointer if all files are blank.
func GetServerTLSConfig(
TLSCert, TLSKey string,
TLSAllowedCACerts []string,
) (*tls.Config, error) {
if TLSCert == "" && TLSKey == "" && len(TLSAllowedCACerts) == 0 {
return nil, nil
}
t := &tls.Config{}
if len(TLSAllowedCACerts) != 0 {
caCertPool := x509.NewCertPool()
for _, cert := range TLSAllowedCACerts {
c, err := ioutil.ReadFile(cert)
if err != nil {
return nil, errors.New(fmt.Sprintf("Could not load TLS CA: %s",
err))
}
caCertPool.AppendCertsFromPEM(c)
}
t.ClientCAs = caCertPool
t.ClientAuth = tls.RequireAndVerifyClientCert
}
if TLSCert != "" && TLSKey != "" {
cert, err := tls.LoadX509KeyPair(TLSCert, TLSKey)
if err != nil {
return nil, errors.New(fmt.Sprintf(
"Could not load TLS client key/certificate from %s:%s: %s",
TLSKey, TLSCert, err))
}
t.Certificates = []tls.Certificate{cert}
}
t.BuildNameToCertificate()
return t, nil
}
// SnakeCase converts the given string to snake case following the Golang format:
// acronyms are converted to lower-case and preceded by an underscore.
func SnakeCase(in string) string {

View File

@@ -40,9 +40,13 @@ func TestSnakeCase(t *testing.T) {
var (
sleepbin, _ = exec.LookPath("sleep")
echobin, _ = exec.LookPath("echo")
shell, _ = exec.LookPath("sh")
)
func TestRunTimeout(t *testing.T) {
if testing.Short() {
t.Skip("Skipping test due to random failures.")
}
if sleepbin == "" {
t.Skip("'sleep' binary not available on OS, skipping.")
}
@@ -57,6 +61,8 @@ func TestRunTimeout(t *testing.T) {
}
func TestCombinedOutputTimeout(t *testing.T) {
// TODO: Fix this test
t.Skip("Test failing too often, skip for now and revisit later.")
if sleepbin == "" {
t.Skip("'sleep' binary not available on OS, skipping.")
}
@@ -84,13 +90,13 @@ func TestCombinedOutput(t *testing.T) {
// test that CombinedOutputTimeout and exec.Cmd.CombinedOutput return
// the same output from a failed command.
func TestCombinedOutputError(t *testing.T) {
if sleepbin == "" {
t.Skip("'sleep' binary not available on OS, skipping.")
if shell == "" {
t.Skip("'sh' binary not available on OS, skipping.")
}
cmd := exec.Command(sleepbin, "foo")
cmd := exec.Command(shell, "-c", "false")
expected, err := cmd.CombinedOutput()
cmd2 := exec.Command(sleepbin, "foo")
cmd2 := exec.Command(shell, "-c", "false")
actual, err := CombinedOutputTimeout(cmd2, time.Second)
assert.Error(t, err)
@@ -98,16 +104,18 @@ func TestCombinedOutputError(t *testing.T) {
}
func TestRunError(t *testing.T) {
if sleepbin == "" {
t.Skip("'sleep' binary not available on OS, skipping.")
if shell == "" {
t.Skip("'sh' binary not available on OS, skipping.")
}
cmd := exec.Command(sleepbin, "foo")
cmd := exec.Command(shell, "-c", "false")
err := RunTimeout(cmd, time.Second)
assert.Error(t, err)
}
func TestRandomSleep(t *testing.T) {
// TODO: Fix this test
t.Skip("Test failing too often, skip for now and revisit later.")
// test that zero max returns immediately
s := time.Now()
RandomSleep(time.Duration(0), make(chan struct{}))

View File

@@ -1,54 +0,0 @@
package limiter
import (
"testing"
"time"
"github.com/stretchr/testify/assert"
)
func TestRateLimiter(t *testing.T) {
r := NewRateLimiter(5, time.Second)
ticker := time.NewTicker(time.Millisecond * 75)
// test that we can only get 5 receives from the rate limiter
counter := 0
outer:
for {
select {
case <-r.C:
counter++
case <-ticker.C:
break outer
}
}
assert.Equal(t, 5, counter)
r.Stop()
// verify that the Stop function closes the channel.
_, ok := <-r.C
assert.False(t, ok)
}
func TestRateLimiterMultipleIterations(t *testing.T) {
r := NewRateLimiter(5, time.Millisecond*50)
ticker := time.NewTicker(time.Millisecond * 250)
// test that we can get 15 receives from the rate limiter
counter := 0
outer:
for {
select {
case <-ticker.C:
break outer
case <-r.C:
counter++
}
}
assert.True(t, counter > 10)
r.Stop()
// verify that the Stop function closes the channel.
_, ok := <-r.C
assert.False(t, ok)
}

View File

@@ -132,6 +132,7 @@ func (f *Filter) Apply(
return true
}
// IsActive checking if filter is active
func (f *Filter) IsActive() bool {
return f.isActive
}
@@ -139,43 +140,66 @@ func (f *Filter) IsActive() bool {
// shouldNamePass returns true if the metric should pass, false if should drop
// based on the drop/pass filter parameters
func (f *Filter) shouldNamePass(key string) bool {
if f.namePass != nil {
pass := func(f *Filter) bool {
if f.namePass.Match(key) {
return true
}
return false
}
if f.nameDrop != nil {
drop := func(f *Filter) bool {
if f.nameDrop.Match(key) {
return false
}
return true
}
if f.namePass != nil && f.nameDrop != nil {
return pass(f) && drop(f)
} else if f.namePass != nil {
return pass(f)
} else if f.nameDrop != nil {
return drop(f)
}
return true
}
// shouldFieldPass returns true if the metric should pass, false if should drop
// based on the drop/pass filter parameters
func (f *Filter) shouldFieldPass(key string) bool {
if f.fieldPass != nil {
pass := func(f *Filter) bool {
if f.fieldPass.Match(key) {
return true
}
return false
}
if f.fieldDrop != nil {
drop := func(f *Filter) bool {
if f.fieldDrop.Match(key) {
return false
}
return true
}
if f.fieldPass != nil && f.fieldDrop != nil {
return pass(f) && drop(f)
} else if f.fieldPass != nil {
return pass(f)
} else if f.fieldDrop != nil {
return drop(f)
}
return true
}
// shouldTagsPass returns true if the metric should pass, false if should drop
// based on the tagdrop/tagpass filter parameters
func (f *Filter) shouldTagsPass(tags map[string]string) bool {
if f.TagPass != nil {
pass := func(f *Filter) bool {
for _, pat := range f.TagPass {
if pat.filter == nil {
continue
@@ -189,7 +213,7 @@ func (f *Filter) shouldTagsPass(tags map[string]string) bool {
return false
}
if f.TagDrop != nil {
drop := func(f *Filter) bool {
for _, pat := range f.TagDrop {
if pat.filter == nil {
continue
@@ -203,6 +227,18 @@ func (f *Filter) shouldTagsPass(tags map[string]string) bool {
return true
}
// Add additional logic in case where both parameters are set.
// see: https://github.com/influxdata/telegraf/issues/2860
if f.TagPass != nil && f.TagDrop != nil {
// return true only in case when tag pass and won't be dropped (true, true).
// in case when the same tag should be passed and dropped it will be dropped (true, false).
return pass(f) && drop(f)
} else if f.TagPass != nil {
return pass(f)
} else if f.TagDrop != nil {
return drop(f)
}
return true
}

View File

@@ -357,3 +357,88 @@ func TestFilter_FilterTagsMatches(t *testing.T) {
"mytag": "foobar",
}, pretags)
}
// TestFilter_FilterNamePassAndDrop used for check case when
// both parameters were defined
// see: https://github.com/influxdata/telegraf/issues/2860
func TestFilter_FilterNamePassAndDrop(t *testing.T) {
inputData := []string{"name1", "name2", "name3", "name4"}
expectedResult := []bool{false, true, false, false}
f := Filter{
NamePass: []string{"name1", "name2"},
NameDrop: []string{"name1", "name3"},
}
require.NoError(t, f.Compile())
for i, name := range inputData {
assert.Equal(t, f.shouldNamePass(name), expectedResult[i])
}
}
// TestFilter_FilterFieldPassAndDrop used for check case when
// both parameters were defined
// see: https://github.com/influxdata/telegraf/issues/2860
func TestFilter_FilterFieldPassAndDrop(t *testing.T) {
inputData := []string{"field1", "field2", "field3", "field4"}
expectedResult := []bool{false, true, false, false}
f := Filter{
FieldPass: []string{"field1", "field2"},
FieldDrop: []string{"field1", "field3"},
}
require.NoError(t, f.Compile())
for i, field := range inputData {
assert.Equal(t, f.shouldFieldPass(field), expectedResult[i])
}
}
// TestFilter_FilterTagsPassAndDrop used for check case when
// both parameters were defined
// see: https://github.com/influxdata/telegraf/issues/2860
func TestFilter_FilterTagsPassAndDrop(t *testing.T) {
inputData := []map[string]string{
{"tag1": "1", "tag2": "3"},
{"tag1": "1", "tag2": "2"},
{"tag1": "2", "tag2": "1"},
{"tag1": "4", "tag2": "1"},
}
expectedResult := []bool{false, true, false, false}
filterPass := []TagFilter{
TagFilter{
Name: "tag1",
Filter: []string{"1", "4"},
},
}
filterDrop := []TagFilter{
TagFilter{
Name: "tag1",
Filter: []string{"4"},
},
TagFilter{
Name: "tag2",
Filter: []string{"3"},
},
}
f := Filter{
TagDrop: filterDrop,
TagPass: filterPass,
}
require.NoError(t, f.Compile())
for i, tag := range inputData {
assert.Equal(t, f.shouldTagsPass(tag), expectedResult[i])
}
}

View File

@@ -2,7 +2,6 @@ package models
import (
"log"
"math"
"time"
"github.com/influxdata/telegraf"
@@ -77,62 +76,6 @@ func makemetric(
}
}
for k, v := range fields {
// Validate uint64 and float64 fields
// convert all int & uint types to int64
switch val := v.(type) {
case nil:
// delete nil fields
delete(fields, k)
case uint:
fields[k] = int64(val)
continue
case uint8:
fields[k] = int64(val)
continue
case uint16:
fields[k] = int64(val)
continue
case uint32:
fields[k] = int64(val)
continue
case int:
fields[k] = int64(val)
continue
case int8:
fields[k] = int64(val)
continue
case int16:
fields[k] = int64(val)
continue
case int32:
fields[k] = int64(val)
continue
case uint64:
// InfluxDB does not support writing uint64
if val < uint64(9223372036854775808) {
fields[k] = int64(val)
} else {
fields[k] = int64(9223372036854775807)
}
continue
case float32:
fields[k] = float64(val)
continue
case float64:
// NaNs are invalid values in influxdb, skip measurement
if math.IsNaN(val) || math.IsInf(val, 0) {
log.Printf("D! Measurement [%s] field [%s] has a NaN or Inf "+
"field, skipping",
measurement, k)
delete(fields, k)
continue
}
default:
fields[k] = v
}
}
m, err := metric.New(measurement, tags, fields, t, mType)
if err != nil {
log.Printf("Error adding point [%s]: %s\n", measurement, err.Error())

View File

@@ -1,7 +1,6 @@
package models
import (
"fmt"
"sync"
"sync/atomic"
"testing"
@@ -167,69 +166,6 @@ func TestAddDropOriginal(t *testing.T) {
assert.False(t, ra.Add(m2))
}
// make an untyped, counter, & gauge metric
func TestMakeMetricA(t *testing.T) {
now := time.Now()
ra := NewRunningAggregator(&TestAggregator{}, &AggregatorConfig{
Name: "TestRunningAggregator",
})
assert.Equal(t, "aggregators.TestRunningAggregator", ra.Name())
m := ra.MakeMetric(
"RITest",
map[string]interface{}{"value": int(101)},
map[string]string{},
telegraf.Untyped,
now,
)
assert.Equal(
t,
fmt.Sprintf("RITest value=101i %d\n", now.UnixNano()),
m.String(),
)
assert.Equal(
t,
m.Type(),
telegraf.Untyped,
)
m = ra.MakeMetric(
"RITest",
map[string]interface{}{"value": int(101)},
map[string]string{},
telegraf.Counter,
now,
)
assert.Equal(
t,
fmt.Sprintf("RITest value=101i %d\n", now.UnixNano()),
m.String(),
)
assert.Equal(
t,
m.Type(),
telegraf.Counter,
)
m = ra.MakeMetric(
"RITest",
map[string]interface{}{"value": int(101)},
map[string]string{},
telegraf.Gauge,
now,
)
assert.Equal(
t,
fmt.Sprintf("RITest value=101i %d\n", now.UnixNano()),
m.String(),
)
assert.Equal(
t,
m.Type(),
telegraf.Gauge,
)
}
type TestAggregator struct {
sum int64
}

View File

@@ -5,6 +5,7 @@ import (
"time"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins/serializers/influx"
"github.com/influxdata/telegraf/selfstat"
)
@@ -75,7 +76,12 @@ func (r *RunningInput) MakeMetric(
)
if r.trace && m != nil {
fmt.Print("> " + m.String())
s := influx.NewSerializer()
s.SetFieldSortOrder(influx.SortFields)
octets, err := s.Serialize(m)
if err == nil {
fmt.Print("> " + string(octets))
}
}
r.MetricsGathered.Incr(1)

View File

@@ -1,14 +1,14 @@
package models
import (
"fmt"
"math"
"testing"
"time"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/metric"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestMakeMetricNoFields(t *testing.T) {
@@ -44,77 +44,17 @@ func TestMakeMetricNilFields(t *testing.T) {
telegraf.Untyped,
now,
)
assert.Equal(
t,
fmt.Sprintf("RITest value=101i %d\n", now.UnixNano()),
m.String(),
)
}
// make an untyped, counter, & gauge metric
func TestMakeMetric(t *testing.T) {
now := time.Now()
ri := NewRunningInput(&testInput{}, &InputConfig{
Name: "TestRunningInput",
})
ri.SetTrace(true)
assert.Equal(t, true, ri.Trace())
assert.Equal(t, "inputs.TestRunningInput", ri.Name())
m := ri.MakeMetric(
"RITest",
map[string]interface{}{"value": int(101)},
expected, err := metric.New("RITest",
map[string]string{},
telegraf.Untyped,
map[string]interface{}{
"value": int(101),
},
now,
)
assert.Equal(
t,
fmt.Sprintf("RITest value=101i %d\n", now.UnixNano()),
m.String(),
)
assert.Equal(
t,
m.Type(),
telegraf.Untyped,
)
require.NoError(t, err)
m = ri.MakeMetric(
"RITest",
map[string]interface{}{"value": int(101)},
map[string]string{},
telegraf.Counter,
now,
)
assert.Equal(
t,
fmt.Sprintf("RITest value=101i %d\n", now.UnixNano()),
m.String(),
)
assert.Equal(
t,
m.Type(),
telegraf.Counter,
)
m = ri.MakeMetric(
"RITest",
map[string]interface{}{"value": int(101)},
map[string]string{},
telegraf.Gauge,
now,
)
assert.Equal(
t,
fmt.Sprintf("RITest value=101i %d\n", now.UnixNano()),
m.String(),
)
assert.Equal(
t,
m.Type(),
telegraf.Gauge,
)
require.Equal(t, expected, m)
}
func TestMakeMetricWithPluginTags(t *testing.T) {
@@ -136,11 +76,18 @@ func TestMakeMetricWithPluginTags(t *testing.T) {
telegraf.Untyped,
now,
)
assert.Equal(
t,
fmt.Sprintf("RITest,foo=bar value=101i %d\n", now.UnixNano()),
m.String(),
expected, err := metric.New("RITest",
map[string]string{
"foo": "bar",
},
map[string]interface{}{
"value": 101,
},
now,
)
require.NoError(t, err)
require.Equal(t, expected, m)
}
func TestMakeMetricFilteredOut(t *testing.T) {
@@ -186,87 +133,17 @@ func TestMakeMetricWithDaemonTags(t *testing.T) {
telegraf.Untyped,
now,
)
assert.Equal(
t,
fmt.Sprintf("RITest,foo=bar value=101i %d\n", now.UnixNano()),
m.String(),
)
}
// make an untyped, counter, & gauge metric
func TestMakeMetricInfFields(t *testing.T) {
inf := math.Inf(1)
ninf := math.Inf(-1)
now := time.Now()
ri := NewRunningInput(&testInput{}, &InputConfig{
Name: "TestRunningInput",
})
ri.SetTrace(true)
assert.Equal(t, true, ri.Trace())
m := ri.MakeMetric(
"RITest",
map[string]interface{}{
"value": int(101),
"inf": inf,
"ninf": ninf,
expected, err := metric.New("RITest",
map[string]string{
"foo": "bar",
},
map[string]interface{}{
"value": 101,
},
map[string]string{},
telegraf.Untyped,
now,
)
assert.Equal(
t,
fmt.Sprintf("RITest value=101i %d\n", now.UnixNano()),
m.String(),
)
}
func TestMakeMetricAllFieldTypes(t *testing.T) {
now := time.Now()
ri := NewRunningInput(&testInput{}, &InputConfig{
Name: "TestRunningInput",
})
ri.SetTrace(true)
assert.Equal(t, true, ri.Trace())
m := ri.MakeMetric(
"RITest",
map[string]interface{}{
"a": int(10),
"b": int8(10),
"c": int16(10),
"d": int32(10),
"e": uint(10),
"f": uint8(10),
"g": uint16(10),
"h": uint32(10),
"i": uint64(10),
"j": float32(10),
"k": uint64(9223372036854775810),
"l": "foobar",
"m": true,
},
map[string]string{},
telegraf.Untyped,
now,
)
assert.Contains(t, m.String(), "a=10i")
assert.Contains(t, m.String(), "b=10i")
assert.Contains(t, m.String(), "c=10i")
assert.Contains(t, m.String(), "d=10i")
assert.Contains(t, m.String(), "e=10i")
assert.Contains(t, m.String(), "f=10i")
assert.Contains(t, m.String(), "g=10i")
assert.Contains(t, m.String(), "h=10i")
assert.Contains(t, m.String(), "i=10i")
assert.Contains(t, m.String(), "j=10")
assert.NotContains(t, m.String(), "j=10i")
assert.Contains(t, m.String(), "k=9223372036854775807i")
assert.Contains(t, m.String(), "l=\"foobar\"")
assert.Contains(t, m.String(), "m=true")
require.NoError(t, err)
require.Equal(t, expected, m)
}
func TestMakeMetricNameOverride(t *testing.T) {
@@ -283,11 +160,15 @@ func TestMakeMetricNameOverride(t *testing.T) {
telegraf.Untyped,
now,
)
assert.Equal(
t,
fmt.Sprintf("foobar value=101i %d\n", now.UnixNano()),
m.String(),
expected, err := metric.New("foobar",
nil,
map[string]interface{}{
"value": 101,
},
now,
)
require.NoError(t, err)
require.Equal(t, expected, m)
}
func TestMakeMetricNamePrefix(t *testing.T) {
@@ -304,11 +185,15 @@ func TestMakeMetricNamePrefix(t *testing.T) {
telegraf.Untyped,
now,
)
assert.Equal(
t,
fmt.Sprintf("foobar_RITest value=101i %d\n", now.UnixNano()),
m.String(),
expected, err := metric.New("foobar_RITest",
nil,
map[string]interface{}{
"value": 101,
},
now,
)
require.NoError(t, err)
require.Equal(t, expected, m)
}
func TestMakeMetricNameSuffix(t *testing.T) {
@@ -325,11 +210,15 @@ func TestMakeMetricNameSuffix(t *testing.T) {
telegraf.Untyped,
now,
)
assert.Equal(
t,
fmt.Sprintf("RITest_foobar value=101i %d\n", now.UnixNano()),
m.String(),
expected, err := metric.New("RITest_foobar",
nil,
map[string]interface{}{
"value": 101,
},
now,
)
require.NoError(t, err)
require.Equal(t, expected, m)
}
type testInput struct{}

View File

@@ -2,6 +2,7 @@ package models
import (
"log"
"sync"
"time"
"github.com/influxdata/telegraf"
@@ -34,6 +35,9 @@ type RunningOutput struct {
metrics *buffer.Buffer
failMetrics *buffer.Buffer
// Guards against concurrent calls to the Output as described in #3009
sync.Mutex
}
func NewRunningOutput(
@@ -83,13 +87,16 @@ func NewRunningOutput(
map[string]string{"output": name},
),
}
ro.BufferLimit.Incr(int64(ro.MetricBufferLimit))
ro.BufferLimit.Set(int64(ro.MetricBufferLimit))
return ro
}
// AddMetric adds a metric to the output. This function can also write cached
// points if FlushBufferWhenFull is true.
func (ro *RunningOutput) AddMetric(m telegraf.Metric) {
if m == nil {
return
}
// Filter any tagexclude/taginclude parameters before adding metric
if ro.Config.Filter.IsActive() {
// In order to filter out tags, we need to create a new metric, since
@@ -106,6 +113,11 @@ func (ro *RunningOutput) AddMetric(m telegraf.Metric) {
m, _ = metric.New(name, tags, fields, t)
}
if output, ok := ro.Output.(telegraf.AggregatingOutput); ok {
output.Add(m)
return
}
ro.metrics.Add(m)
if ro.metrics.Len() == ro.MetricBatchSize {
batch := ro.metrics.Batch(ro.MetricBatchSize)
@@ -118,10 +130,16 @@ func (ro *RunningOutput) AddMetric(m telegraf.Metric) {
// Write writes all cached points to this output.
func (ro *RunningOutput) Write() error {
if output, ok := ro.Output.(telegraf.AggregatingOutput); ok {
metrics := output.Push()
ro.metrics.Add(metrics...)
output.Reset()
}
nFails, nMetrics := ro.failMetrics.Len(), ro.metrics.Len()
ro.BufferSize.Set(int64(nFails + nMetrics))
log.Printf("D! Output [%s] buffer fullness: %d / %d metrics. ",
ro.Name, nFails+nMetrics, ro.MetricBufferLimit)
ro.BufferSize.Incr(int64(nFails + nMetrics))
var err error
if !ro.failMetrics.IsEmpty() {
// how many batches of failed writes we need to write.
@@ -166,6 +184,8 @@ func (ro *RunningOutput) write(metrics []telegraf.Metric) error {
if nMetrics == 0 {
return nil
}
ro.Lock()
defer ro.Unlock()
start := time.Now()
err := ro.Output.Write(metrics)
elapsed := time.Since(start)
@@ -173,7 +193,6 @@ func (ro *RunningOutput) write(metrics []telegraf.Metric) error {
log.Printf("D! Output [%s] wrote batch of %d metrics in %s\n",
ro.Name, nMetrics, elapsed)
ro.MetricsWritten.Incr(int64(nMetrics))
ro.BufferSize.Incr(-int64(nMetrics))
ro.WriteTime.Incr(elapsed.Nanoseconds())
}
return err

View File

@@ -75,6 +75,23 @@ func BenchmarkRunningOutputAddFailWrites(b *testing.B) {
}
}
func TestAddingNilMetric(t *testing.T) {
conf := &OutputConfig{
Filter: Filter{},
}
m := &mockOutput{}
ro := NewRunningOutput("test", m, conf, 1000, 10000)
ro.AddMetric(nil)
ro.AddMetric(nil)
ro.AddMetric(nil)
err := ro.Write()
assert.NoError(t, err)
assert.Len(t, m.Metrics(), 0)
}
// Test that NameDrop filters ger properly applied.
func TestRunningOutput_DropFilter(t *testing.T) {
conf := &OutputConfig{

View File

@@ -1,11 +1,15 @@
package models
import (
"sync"
"github.com/influxdata/telegraf"
)
type RunningProcessor struct {
Name string
Name string
sync.Mutex
Processor telegraf.Processor
Config *ProcessorConfig
}
@@ -24,6 +28,9 @@ type ProcessorConfig struct {
}
func (rp *RunningProcessor) Apply(in ...telegraf.Metric) []telegraf.Metric {
rp.Lock()
defer rp.Unlock()
ret := []telegraf.Metric{}
for _, metric := range in {

View File

@@ -0,0 +1,86 @@
package templating
import (
"sort"
"strings"
)
const (
// DefaultSeparator is the default separation character to use when separating template parts.
DefaultSeparator = "."
)
// Engine uses a Matcher to retrieve the appropriate template and applies the template
// to the input string
type Engine struct {
joiner string
matcher *matcher
}
// Apply extracts the template fields from the given line and returns the measurement
// name, tags and field name
func (e *Engine) Apply(line string) (string, map[string]string, string, error) {
return e.matcher.match(line).Apply(line, e.joiner)
}
// NewEngine creates a new templating engine
func NewEngine(joiner string, defaultTemplate *Template, templates []string) (*Engine, error) {
engine := Engine{
joiner: joiner,
matcher: newMatcher(defaultTemplate),
}
templateSpecs := parseTemplateSpecs(templates)
for _, templateSpec := range templateSpecs {
if err := engine.matcher.addSpec(templateSpec); err != nil {
return nil, err
}
}
return &engine, nil
}
func parseTemplateSpecs(templates []string) templateSpecs {
tmplts := templateSpecs{}
for _, pattern := range templates {
tmplt := templateSpec{
separator: DefaultSeparator,
}
// Format is [separator] [filter] <template> [tag1=value1,tag2=value2]
parts := strings.Fields(pattern)
partsLength := len(parts)
if partsLength < 1 {
// ignore
continue
}
if partsLength == 1 {
tmplt.template = pattern
} else if partsLength == 4 {
tmplt.separator = parts[0]
tmplt.filter = parts[1]
tmplt.template = parts[2]
tmplt.tagstring = parts[3]
} else {
hasTagstring := strings.Contains(parts[partsLength-1], "=")
if hasTagstring {
tmplt.tagstring = parts[partsLength-1]
tmplt.template = parts[partsLength-2]
if partsLength == 3 {
tmplt.filter = parts[0]
}
} else {
tmplt.template = parts[partsLength-1]
if partsLength == 2 {
tmplt.filter = parts[0]
} else { // length == 3
tmplt.separator = parts[0]
tmplt.filter = parts[1]
}
}
}
tmplts = append(tmplts, tmplt)
}
sort.Sort(tmplts)
return tmplts
}

View File

@@ -0,0 +1,58 @@
package templating
import (
"strings"
)
// matcher determines which template should be applied to a given metric
// based on a filter tree.
type matcher struct {
root *node
defaultTemplate *Template
}
// newMatcher creates a new matcher.
func newMatcher(defaultTemplate *Template) *matcher {
return &matcher{
root: &node{},
defaultTemplate: defaultTemplate,
}
}
func (m *matcher) addSpec(tmplt templateSpec) error {
// Parse out the default tags specific to this template
tags := map[string]string{}
if tmplt.tagstring != "" {
for _, kv := range strings.Split(tmplt.tagstring, ",") {
parts := strings.Split(kv, "=")
tags[parts[0]] = parts[1]
}
}
tmpl, err := NewTemplate(tmplt.separator, tmplt.template, tags)
if err != nil {
return err
}
m.add(tmplt.filter, tmpl)
return nil
}
// add inserts the template in the filter tree based the given filter
func (m *matcher) add(filter string, template *Template) {
if filter == "" {
m.defaultTemplate = template
m.root.separator = template.separator
return
}
m.root.insert(filter, template)
}
// match returns the template that matches the given measurement line.
// If no template matches, the default template is returned.
func (m *matcher) match(line string) *Template {
tmpl := m.root.search(line)
if tmpl != nil {
return tmpl
}
return m.defaultTemplate
}

122
internal/templating/node.go Normal file
View File

@@ -0,0 +1,122 @@
package templating
import (
"sort"
"strings"
)
// node is an item in a sorted k-ary tree of filter parts. Each child is sorted by its part value.
// The special value of "*", is always sorted last.
type node struct {
separator string
value string
children nodes
template *Template
}
// insert inserts the given string template into the tree. The filter string is separated
// on the template separator and each part is used as the path in the tree.
func (n *node) insert(filter string, template *Template) {
n.separator = template.separator
n.recursiveInsert(strings.Split(filter, n.separator), template)
}
// recursiveInsert does the actual recursive insertion
func (n *node) recursiveInsert(values []string, template *Template) {
// Add the end, set the template
if len(values) == 0 {
n.template = template
return
}
// See if the the current element already exists in the tree. If so, insert the
// into that sub-tree
for _, v := range n.children {
if v.value == values[0] {
v.recursiveInsert(values[1:], template)
return
}
}
// New element, add it to the tree and sort the children
newNode := &node{value: values[0]}
n.children = append(n.children, newNode)
sort.Sort(&n.children)
// Now insert the rest of the tree into the new element
newNode.recursiveInsert(values[1:], template)
}
// search searches for a template matching the input string
func (n *node) search(line string) *Template {
separator := n.separator
return n.recursiveSearch(strings.Split(line, separator))
}
// recursiveSearch performs the actual recursive search
func (n *node) recursiveSearch(lineParts []string) *Template {
// Nothing to search
if len(lineParts) == 0 || len(n.children) == 0 {
return n.template
}
// If last element is a wildcard, don't include it in this search since it's sorted
// to the end but lexicographically it would not always be and sort.Search assumes
// the slice is sorted.
length := len(n.children)
if n.children[length-1].value == "*" {
length--
}
// Find the index of child with an exact match
i := sort.Search(length, func(i int) bool {
return n.children[i].value >= lineParts[0]
})
// Found an exact match, so search that child sub-tree
if i < len(n.children) && n.children[i].value == lineParts[0] {
return n.children[i].recursiveSearch(lineParts[1:])
}
// Not an exact match, see if we have a wildcard child to search
if n.children[len(n.children)-1].value == "*" {
return n.children[len(n.children)-1].recursiveSearch(lineParts[1:])
}
return n.template
}
// nodes is simply an array of nodes implementing the sorting interface.
type nodes []*node
// Less returns a boolean indicating whether the filter at position j
// is less than the filter at position k. Filters are order by string
// comparison of each component parts. A wildcard value "*" is never
// less than a non-wildcard value.
//
// For example, the filters:
// "*.*"
// "servers.*"
// "servers.localhost"
// "*.localhost"
//
// Would be sorted as:
// "servers.localhost"
// "servers.*"
// "*.localhost"
// "*.*"
func (n *nodes) Less(j, k int) bool {
if (*n)[j].value == "*" && (*n)[k].value != "*" {
return false
}
if (*n)[j].value != "*" && (*n)[k].value == "*" {
return true
}
return (*n)[j].value < (*n)[k].value
}
// Swap swaps two elements of the array
func (n *nodes) Swap(i, j int) { (*n)[i], (*n)[j] = (*n)[j], (*n)[i] }
// Len returns the length of the array
func (n *nodes) Len() int { return len(*n) }

View File

@@ -0,0 +1,148 @@
package templating
import (
"fmt"
"strings"
)
// Template represents a pattern and tags to map a metric string to a influxdb Point
type Template struct {
separator string
parts []string
defaultTags map[string]string
greedyField bool
greedyMeasurement bool
}
// apply extracts the template fields from the given line and returns the measurement
// name, tags and field name
func (t *Template) Apply(line string, joiner string) (string, map[string]string, string, error) {
fields := strings.Split(line, t.separator)
var (
measurement []string
tags = make(map[string][]string)
field []string
)
// Set any default tags
for k, v := range t.defaultTags {
tags[k] = append(tags[k], v)
}
// See if an invalid combination has been specified in the template:
for _, tag := range t.parts {
if tag == "measurement*" {
t.greedyMeasurement = true
} else if tag == "field*" {
t.greedyField = true
}
}
if t.greedyField && t.greedyMeasurement {
return "", nil, "",
fmt.Errorf("either 'field*' or 'measurement*' can be used in each "+
"template (but not both together): %q",
strings.Join(t.parts, joiner))
}
for i, tag := range t.parts {
if i >= len(fields) {
continue
}
if tag == "" {
continue
}
switch tag {
case "measurement":
measurement = append(measurement, fields[i])
case "field":
field = append(field, fields[i])
case "field*":
field = append(field, fields[i:]...)
break
case "measurement*":
measurement = append(measurement, fields[i:]...)
break
default:
tags[tag] = append(tags[tag], fields[i])
}
}
// Convert to map of strings.
outtags := make(map[string]string)
for k, values := range tags {
outtags[k] = strings.Join(values, joiner)
}
return strings.Join(measurement, joiner), outtags, strings.Join(field, joiner), nil
}
func NewDefaultTemplateWithPattern(pattern string) (*Template, error) {
return NewTemplate(DefaultSeparator, pattern, nil)
}
// NewTemplate returns a new template ensuring it has a measurement
// specified.
func NewTemplate(separator string, pattern string, defaultTags map[string]string) (*Template, error) {
parts := strings.Split(pattern, separator)
hasMeasurement := false
template := &Template{
separator: separator,
parts: parts,
defaultTags: defaultTags,
}
for _, part := range parts {
if strings.HasPrefix(part, "measurement") {
hasMeasurement = true
}
if part == "measurement*" {
template.greedyMeasurement = true
} else if part == "field*" {
template.greedyField = true
}
}
if !hasMeasurement {
return nil, fmt.Errorf("no measurement specified for template. %q", pattern)
}
return template, nil
}
// templateSpec is a template string split in its constituent parts
type templateSpec struct {
separator string
filter string
template string
tagstring string
}
// templateSpecs is simply an array of template specs implementing the sorting interface
type templateSpecs []templateSpec
// Less reports whether the element with
// index j should sort before the element with index k.
func (e templateSpecs) Less(j, k int) bool {
if len(e[j].filter) == 0 && len(e[k].filter) == 0 {
jlength := len(strings.Split(e[j].template, e[j].separator))
klength := len(strings.Split(e[k].template, e[k].separator))
return jlength < klength
}
if len(e[j].filter) == 0 {
return true
}
if len(e[k].filter) == 0 {
return false
}
jlength := len(strings.Split(e[j].template, e[j].separator))
klength := len(strings.Split(e[k].template, e[k].separator))
return jlength < klength
}
// Swap swaps the elements with indexes i and j.
func (e templateSpecs) Swap(i, j int) { e[i], e[j] = e[j], e[i] }
// Len is the number of elements in the collection.
func (e templateSpecs) Len() int { return len(e) }

View File

@@ -4,11 +4,14 @@ import (
"io"
"log"
"os"
"regexp"
"time"
"github.com/influxdata/wlog"
)
var prefixRegex = regexp.MustCompile("^[DIWE]!")
// newTelegrafWriter returns a logging-wrapped writer.
func newTelegrafWriter(w io.Writer) io.Writer {
return &telegrafLog{
@@ -21,7 +24,13 @@ type telegrafLog struct {
}
func (t *telegrafLog) Write(b []byte) (n int, err error) {
return t.writer.Write(append([]byte(time.Now().UTC().Format(time.RFC3339)+" "), b...))
var line []byte
if !prefixRegex.Match(b) {
line = append([]byte(time.Now().UTC().Format(time.RFC3339)+" I! "), b...)
} else {
line = append([]byte(time.Now().UTC().Format(time.RFC3339)+" "), b...)
}
return t.writer.Write(line)
}
// SetupLogging configures the logging output.

View File

@@ -51,6 +51,19 @@ func TestErrorWriteLogToFile(t *testing.T) {
assert.Equal(t, f[19:], []byte("Z E! TEST\n"))
}
func TestAddDefaultLogLevel(t *testing.T) {
tmpfile, err := ioutil.TempFile("", "")
assert.NoError(t, err)
defer func() { os.Remove(tmpfile.Name()) }()
SetupLogging(true, false, tmpfile.Name())
log.Printf("TEST")
f, err := ioutil.ReadFile(tmpfile.Name())
assert.NoError(t, err)
assert.Equal(t, f[19:], []byte("Z I! TEST\n"))
}
func BenchmarkTelegrafLogWrite(b *testing.B) {
var msg = []byte("test")
var buf bytes.Buffer

View File

@@ -2,9 +2,6 @@ package telegraf
import (
"time"
// TODO remove
"github.com/influxdata/influxdb/client/v2"
)
// ValueType is an enumeration of metric types that represent a simple value.
@@ -16,47 +13,54 @@ const (
Counter
Gauge
Untyped
Summary
Histogram
)
type Tag struct {
Key string
Value string
}
type Field struct {
Key string
Value interface{}
}
type Metric interface {
Serialize() []byte
String() string // convenience function for string(Serialize())
Copy() Metric
// Split will attempt to return multiple metrics with the same timestamp
// whose string representations are no longer than maxSize.
// Metrics with a single field may exceed the requested size.
Split(maxSize int) []Metric
// Getting data structure functions
Name() string
Tags() map[string]string
TagList() []*Tag
Fields() map[string]interface{}
FieldList() []*Field
Time() time.Time
Type() ValueType
// Name functions
SetName(name string)
AddPrefix(prefix string)
AddSuffix(suffix string)
// Tag functions
GetTag(key string) (string, bool)
HasTag(key string) bool
AddTag(key, value string)
RemoveTag(key string)
// Field functions
GetField(key string) (interface{}, bool)
HasField(key string) bool
AddField(key string, value interface{})
RemoveField(key string) error
RemoveField(key string)
// Name functions
SetName(name string)
SetPrefix(prefix string)
SetSuffix(suffix string)
// Getting data structure functions
Name() string
Tags() map[string]string
Fields() map[string]interface{}
Time() time.Time
UnixNano() int64
Type() ValueType
Len() int // returns the length of the serialized metric, including newline
// HashID returns an unique identifier for the series.
HashID() uint64
// aggregator things:
// Copy returns a deep copy of the Metric.
Copy() Metric
// Mark Metric as an aggregate
SetAggregate(bool)
IsAggregate() bool
// Point returns a influxdb client.Point object
// TODO remove this function
Point() *client.Point
}

53
metric/builder.go Normal file
View File

@@ -0,0 +1,53 @@
package metric
import (
"time"
"github.com/influxdata/telegraf"
)
type TimeFunc func() time.Time
type Builder struct {
TimeFunc
TimePrecision time.Duration
*metric
}
func NewBuilder() *Builder {
b := &Builder{
TimeFunc: time.Now,
TimePrecision: 1 * time.Nanosecond,
}
b.Reset()
return b
}
func (b *Builder) SetName(name string) {
b.name = name
}
func (b *Builder) AddTag(key string, value string) {
b.metric.AddTag(key, value)
}
func (b *Builder) AddField(key string, value interface{}) {
b.metric.AddField(key, value)
}
func (b *Builder) SetTime(tm time.Time) {
b.tm = tm
}
func (b *Builder) Reset() {
b.metric = &metric{}
}
func (b *Builder) Metric() (telegraf.Metric, error) {
if b.tm.IsZero() {
b.tm = b.TimeFunc().Truncate(b.TimePrecision)
}
return b.metric, nil
}

View File

@@ -1,49 +0,0 @@
package metric
import (
"strings"
)
var (
// escaper is for escaping:
// - tag keys
// - tag values
// - field keys
// see https://docs.influxdata.com/influxdb/v1.0/write_protocols/line_protocol_tutorial/#special-characters-and-keywords
escaper = strings.NewReplacer(`,`, `\,`, `"`, `\"`, ` `, `\ `, `=`, `\=`)
unEscaper = strings.NewReplacer(`\,`, `,`, `\"`, `"`, `\ `, ` `, `\=`, `=`)
// nameEscaper is for escaping measurement names only.
// see https://docs.influxdata.com/influxdb/v1.0/write_protocols/line_protocol_tutorial/#special-characters-and-keywords
nameEscaper = strings.NewReplacer(`,`, `\,`, ` `, `\ `)
nameUnEscaper = strings.NewReplacer(`\,`, `,`, `\ `, ` `)
// stringFieldEscaper is for escaping string field values only.
// see https://docs.influxdata.com/influxdb/v1.0/write_protocols/line_protocol_tutorial/#special-characters-and-keywords
stringFieldEscaper = strings.NewReplacer(`"`, `\"`)
stringFieldUnEscaper = strings.NewReplacer(`\"`, `"`)
)
func escape(s string, t string) string {
switch t {
case "fieldkey", "tagkey", "tagval":
return escaper.Replace(s)
case "name":
return nameEscaper.Replace(s)
case "fieldval":
return stringFieldEscaper.Replace(s)
}
return s
}
func unescape(s string, t string) string {
switch t {
case "fieldkey", "tagkey", "tagval":
return unEscaper.Replace(s)
case "name":
return nameUnEscaper.Replace(s)
case "fieldval":
return stringFieldUnEscaper.Replace(s)
}
return s
}

View File

@@ -1,38 +0,0 @@
package metric
import (
"reflect"
"strconv"
"unsafe"
)
// parseIntBytes is a zero-alloc wrapper around strconv.ParseInt.
func parseIntBytes(b []byte, base int, bitSize int) (i int64, err error) {
s := unsafeBytesToString(b)
return strconv.ParseInt(s, base, bitSize)
}
// parseFloatBytes is a zero-alloc wrapper around strconv.ParseFloat.
func parseFloatBytes(b []byte, bitSize int) (float64, error) {
s := unsafeBytesToString(b)
return strconv.ParseFloat(s, bitSize)
}
// parseBoolBytes is a zero-alloc wrapper around strconv.ParseBool.
func parseBoolBytes(b []byte) (bool, error) {
return strconv.ParseBool(unsafeBytesToString(b))
}
// unsafeBytesToString converts a []byte to a string without a heap allocation.
//
// It is unsafe, and is intended to prepare input to short-lived functions
// that require strings.
func unsafeBytesToString(in []byte) string {
src := *(*reflect.SliceHeader)(unsafe.Pointer(&in))
dst := reflect.StringHeader{
Data: src.Data,
Len: src.Len,
}
s := *(*string)(unsafe.Pointer(&dst))
return s
}

View File

@@ -1,103 +0,0 @@
package metric
import (
"strconv"
"testing"
"testing/quick"
)
func TestParseIntBytesEquivalenceFuzz(t *testing.T) {
f := func(b []byte, base int, bitSize int) bool {
exp, expErr := strconv.ParseInt(string(b), base, bitSize)
got, gotErr := parseIntBytes(b, base, bitSize)
return exp == got && checkErrs(expErr, gotErr)
}
cfg := &quick.Config{
MaxCount: 10000,
}
if err := quick.Check(f, cfg); err != nil {
t.Fatal(err)
}
}
func TestParseIntBytesValid64bitBase10EquivalenceFuzz(t *testing.T) {
buf := []byte{}
f := func(n int64) bool {
buf = strconv.AppendInt(buf[:0], n, 10)
exp, expErr := strconv.ParseInt(string(buf), 10, 64)
got, gotErr := parseIntBytes(buf, 10, 64)
return exp == got && checkErrs(expErr, gotErr)
}
cfg := &quick.Config{
MaxCount: 10000,
}
if err := quick.Check(f, cfg); err != nil {
t.Fatal(err)
}
}
func TestParseFloatBytesEquivalenceFuzz(t *testing.T) {
f := func(b []byte, bitSize int) bool {
exp, expErr := strconv.ParseFloat(string(b), bitSize)
got, gotErr := parseFloatBytes(b, bitSize)
return exp == got && checkErrs(expErr, gotErr)
}
cfg := &quick.Config{
MaxCount: 10000,
}
if err := quick.Check(f, cfg); err != nil {
t.Fatal(err)
}
}
func TestParseFloatBytesValid64bitEquivalenceFuzz(t *testing.T) {
buf := []byte{}
f := func(n float64) bool {
buf = strconv.AppendFloat(buf[:0], n, 'f', -1, 64)
exp, expErr := strconv.ParseFloat(string(buf), 64)
got, gotErr := parseFloatBytes(buf, 64)
return exp == got && checkErrs(expErr, gotErr)
}
cfg := &quick.Config{
MaxCount: 10000,
}
if err := quick.Check(f, cfg); err != nil {
t.Fatal(err)
}
}
func TestParseBoolBytesEquivalence(t *testing.T) {
var buf []byte
for _, s := range []string{"1", "t", "T", "TRUE", "true", "True", "0", "f", "F", "FALSE", "false", "False", "fail", "TrUe", "FAlSE", "numbers", ""} {
buf = append(buf[:0], s...)
exp, expErr := strconv.ParseBool(s)
got, gotErr := parseBoolBytes(buf)
if got != exp || !checkErrs(expErr, gotErr) {
t.Errorf("Failed to parse boolean value %q correctly: wanted (%t, %v), got (%t, %v)", s, exp, expErr, got, gotErr)
}
}
}
func checkErrs(a, b error) bool {
if (a == nil) != (b == nil) {
return false
}
return a == nil || a.Error() == b.Error()
}

View File

@@ -1,546 +1,282 @@
package metric
import (
"bytes"
"fmt"
"hash/fnv"
"sort"
"strconv"
"time"
"github.com/influxdata/telegraf"
// TODO remove
"github.com/influxdata/influxdb/client/v2"
)
const MaxInt = int(^uint(0) >> 1)
type metric struct {
name string
tags []*telegraf.Tag
fields []*telegraf.Field
tm time.Time
tp telegraf.ValueType
aggregate bool
}
func New(
name string,
tags map[string]string,
fields map[string]interface{},
t time.Time,
mType ...telegraf.ValueType,
tm time.Time,
tp ...telegraf.ValueType,
) (telegraf.Metric, error) {
if len(fields) == 0 {
return nil, fmt.Errorf("Metric cannot be made without any fields")
}
if len(name) == 0 {
return nil, fmt.Errorf("Metric cannot be made with an empty name")
}
var thisType telegraf.ValueType
if len(mType) > 0 {
thisType = mType[0]
var vtype telegraf.ValueType
if len(tp) > 0 {
vtype = tp[0]
} else {
thisType = telegraf.Untyped
vtype = telegraf.Untyped
}
m := &metric{
name: []byte(escape(name, "name")),
t: []byte(fmt.Sprint(t.UnixNano())),
nsec: t.UnixNano(),
mType: thisType,
name: name,
tags: nil,
fields: nil,
tm: tm,
tp: vtype,
}
// pre-allocate exact size of the tags slice
taglen := 0
for k, v := range tags {
// TODO check that length of tag key & value are > 0
taglen += 2 + len(escape(k, "tagkey")) + len(escape(v, "tagval"))
}
m.tags = make([]byte, taglen)
i := 0
for k, v := range tags {
m.tags[i] = ','
i++
i += copy(m.tags[i:], escape(k, "tagkey"))
m.tags[i] = '='
i++
i += copy(m.tags[i:], escape(v, "tagval"))
}
// pre-allocate capacity of the fields slice
fieldlen := 0
for k, _ := range fields {
// 10 bytes is completely arbitrary, but will at least prevent some
// amount of allocations. There's a small possibility this will create
// slightly more allocations for a metric that has many short fields.
fieldlen += len(k) + 10
}
m.fields = make([]byte, 0, fieldlen)
i = 0
for k, v := range fields {
if i != 0 {
m.fields = append(m.fields, ',')
if len(tags) > 0 {
m.tags = make([]*telegraf.Tag, 0, len(tags))
for k, v := range tags {
m.tags = append(m.tags,
&telegraf.Tag{Key: k, Value: v})
}
m.fields = appendField(m.fields, k, v)
i++
sort.Slice(m.tags, func(i, j int) bool { return m.tags[i].Key < m.tags[j].Key })
}
m.fields = make([]*telegraf.Field, 0, len(fields))
for k, v := range fields {
v := convertField(v)
if v == nil {
continue
}
m.AddField(k, v)
}
return m, nil
}
// indexUnescapedByte finds the index of the first byte equal to b in buf that
// is not escaped. Returns -1 if not found.
func indexUnescapedByte(buf []byte, b byte) int {
var keyi int
for {
i := bytes.IndexByte(buf[keyi:], b)
if i == -1 {
return -1
} else if i == 0 {
break
}
keyi += i
if countBackslashes(buf, keyi-1)%2 == 0 {
break
} else {
keyi++
}
}
return keyi
}
// countBackslashes counts the number of preceding backslashes starting at
// the 'start' index.
func countBackslashes(buf []byte, index int) int {
var count int
for {
if index < 0 {
return count
}
if buf[index] == '\\' {
count++
index--
} else {
break
}
}
return count
}
type metric struct {
name []byte
tags []byte
fields []byte
t []byte
mType telegraf.ValueType
aggregate bool
// cached values for reuse in "get" functions
hashID uint64
nsec int64
}
func (m *metric) Point() *client.Point {
c, _ := client.NewPoint(m.Name(), m.Tags(), m.Fields(), m.Time())
return c
}
func (m *metric) String() string {
return string(m.name) + string(m.tags) + " " + string(m.fields) + " " + string(m.t) + "\n"
return fmt.Sprintf("%s %v %v %d", m.name, m.Tags(), m.Fields(), m.tm.UnixNano())
}
func (m *metric) Name() string {
return m.name
}
func (m *metric) Tags() map[string]string {
tags := make(map[string]string, len(m.tags))
for _, tag := range m.tags {
tags[tag.Key] = tag.Value
}
return tags
}
func (m *metric) TagList() []*telegraf.Tag {
return m.tags
}
func (m *metric) Fields() map[string]interface{} {
fields := make(map[string]interface{}, len(m.fields))
for _, field := range m.fields {
fields[field.Key] = field.Value
}
return fields
}
func (m *metric) FieldList() []*telegraf.Field {
return m.fields
}
func (m *metric) Time() time.Time {
return m.tm
}
func (m *metric) Type() telegraf.ValueType {
return m.tp
}
func (m *metric) SetName(name string) {
m.name = name
}
func (m *metric) AddPrefix(prefix string) {
m.name = prefix + m.name
}
func (m *metric) AddSuffix(suffix string) {
m.name = m.name + suffix
}
func (m *metric) AddTag(key, value string) {
for i, tag := range m.tags {
if key > tag.Key {
continue
}
if key == tag.Key {
tag.Value = value
return
}
m.tags = append(m.tags, nil)
copy(m.tags[i+1:], m.tags[i:])
m.tags[i] = &telegraf.Tag{Key: key, Value: value}
return
}
m.tags = append(m.tags, &telegraf.Tag{Key: key, Value: value})
}
func (m *metric) HasTag(key string) bool {
for _, tag := range m.tags {
if tag.Key == key {
return true
}
}
return false
}
func (m *metric) GetTag(key string) (string, bool) {
for _, tag := range m.tags {
if tag.Key == key {
return tag.Value, true
}
}
return "", false
}
func (m *metric) RemoveTag(key string) {
for i, tag := range m.tags {
if tag.Key == key {
copy(m.tags[i:], m.tags[i+1:])
m.tags[len(m.tags)-1] = nil
m.tags = m.tags[:len(m.tags)-1]
return
}
}
}
func (m *metric) AddField(key string, value interface{}) {
for i, field := range m.fields {
if key == field.Key {
m.fields[i] = &telegraf.Field{Key: key, Value: convertField(value)}
}
}
m.fields = append(m.fields, &telegraf.Field{Key: key, Value: convertField(value)})
}
func (m *metric) HasField(key string) bool {
for _, field := range m.fields {
if field.Key == key {
return true
}
}
return false
}
func (m *metric) GetField(key string) (interface{}, bool) {
for _, field := range m.fields {
if field.Key == key {
return field.Value, true
}
}
return nil, false
}
func (m *metric) RemoveField(key string) {
for i, field := range m.fields {
if field.Key == key {
copy(m.fields[i:], m.fields[i+1:])
m.fields[len(m.fields)-1] = nil
m.fields = m.fields[:len(m.fields)-1]
return
}
}
}
func (m *metric) Copy() telegraf.Metric {
m2 := &metric{
name: m.name,
tags: make([]*telegraf.Tag, len(m.tags)),
fields: make([]*telegraf.Field, len(m.fields)),
tm: m.tm,
tp: m.tp,
aggregate: m.aggregate,
}
for i, tag := range m.tags {
m2.tags[i] = tag
}
for i, field := range m.fields {
m2.fields[i] = field
}
return m2
}
func (m *metric) SetAggregate(b bool) {
m.aggregate = b
m.aggregate = true
}
func (m *metric) IsAggregate() bool {
return m.aggregate
}
func (m *metric) Type() telegraf.ValueType {
return m.mType
}
func (m *metric) Len() int {
// 3 is for 2 spaces surrounding the fields array + newline at the end.
return len(m.name) + len(m.tags) + len(m.fields) + len(m.t) + 3
}
func (m *metric) Serialize() []byte {
tmp := make([]byte, m.Len())
i := 0
i += copy(tmp[i:], m.name)
i += copy(tmp[i:], m.tags)
tmp[i] = ' '
i++
i += copy(tmp[i:], m.fields)
tmp[i] = ' '
i++
i += copy(tmp[i:], m.t)
tmp[i] = '\n'
return tmp
}
func (m *metric) Split(maxSize int) []telegraf.Metric {
if m.Len() < maxSize {
return []telegraf.Metric{m}
}
var out []telegraf.Metric
// constant number of bytes for each metric (in addition to field bytes)
constant := len(m.name) + len(m.tags) + len(m.t) + 3
// currently selected fields
fields := make([]byte, 0, maxSize)
i := 0
for {
if i >= len(m.fields) {
// hit the end of the field byte slice
if len(fields) > 0 {
out = append(out, copyWith(m.name, m.tags, fields, m.t))
}
break
}
// find the end of the next field
j := indexUnescapedByte(m.fields[i:], ',')
if j == -1 {
j = len(m.fields)
} else {
j += i
}
// if true, then we need to create a metric _not_ including the currently
// selected field
if len(m.fields[i:j])+len(fields)+constant > maxSize {
// if false, then we'll create a metric including the currently
// selected field anyways. This means that the given maxSize is too
// small for a single field to fit.
if len(fields) > 0 {
out = append(out, copyWith(m.name, m.tags, fields, m.t))
}
fields = make([]byte, 0, maxSize)
}
if len(fields) > 0 {
fields = append(fields, ',')
}
fields = append(fields, m.fields[i:j]...)
i = j + 1
}
return out
}
func (m *metric) Fields() map[string]interface{} {
fieldMap := map[string]interface{}{}
i := 0
for {
if i >= len(m.fields) {
break
}
// end index of field key
i1 := indexUnescapedByte(m.fields[i:], '=')
if i1 == -1 {
break
}
// start index of field value
i2 := i1 + 1
// end index of field value
var i3 int
if m.fields[i:][i2] == '"' {
i3 = indexUnescapedByte(m.fields[i:][i2+1:], '"')
if i3 == -1 {
i3 = len(m.fields[i:])
}
i3 += i2 + 2 // increment index to the comma
} else {
i3 = indexUnescapedByte(m.fields[i:], ',')
if i3 == -1 {
i3 = len(m.fields[i:])
}
}
switch m.fields[i:][i2] {
case '"':
// string field
fieldMap[unescape(string(m.fields[i:][0:i1]), "fieldkey")] = unescape(string(m.fields[i:][i2+1:i3-1]), "fieldval")
case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
// number field
switch m.fields[i:][i3-1] {
case 'i':
// integer field
n, err := parseIntBytes(m.fields[i:][i2:i3-1], 10, 64)
if err == nil {
fieldMap[unescape(string(m.fields[i:][0:i1]), "fieldkey")] = n
} else {
// TODO handle error or just ignore field silently?
}
default:
// float field
n, err := parseFloatBytes(m.fields[i:][i2:i3], 64)
if err == nil {
fieldMap[unescape(string(m.fields[i:][0:i1]), "fieldkey")] = n
} else {
// TODO handle error or just ignore field silently?
}
}
case 'T', 't':
fieldMap[unescape(string(m.fields[i:][0:i1]), "fieldkey")] = true
case 'F', 'f':
fieldMap[unescape(string(m.fields[i:][0:i1]), "fieldkey")] = false
default:
// TODO handle unsupported field type
}
i += i3 + 1
}
return fieldMap
}
func (m *metric) Tags() map[string]string {
tagMap := map[string]string{}
if len(m.tags) == 0 {
return tagMap
}
i := 0
for {
// start index of tag key
i0 := indexUnescapedByte(m.tags[i:], ',') + 1
if i0 == 0 {
// didn't find a tag start
break
}
// end index of tag key
i1 := indexUnescapedByte(m.tags[i:], '=')
// start index of tag value
i2 := i1 + 1
// end index of tag value (starting from i2)
i3 := indexUnescapedByte(m.tags[i+i2:], ',')
if i3 == -1 {
tagMap[unescape(string(m.tags[i:][i0:i1]), "tagkey")] = unescape(string(m.tags[i:][i2:]), "tagval")
break
}
tagMap[unescape(string(m.tags[i:][i0:i1]), "tagkey")] = unescape(string(m.tags[i:][i2:i2+i3]), "tagval")
// increment start index for the next tag
i += i2 + i3
}
return tagMap
}
func (m *metric) Name() string {
return unescape(string(m.name), "name")
}
func (m *metric) Time() time.Time {
// assume metric has been verified already and ignore error:
if m.nsec == 0 {
m.nsec, _ = parseIntBytes(m.t, 10, 64)
}
return time.Unix(0, m.nsec)
}
func (m *metric) UnixNano() int64 {
// assume metric has been verified already and ignore error:
if m.nsec == 0 {
m.nsec, _ = parseIntBytes(m.t, 10, 64)
}
return m.nsec
}
func (m *metric) SetName(name string) {
m.hashID = 0
m.name = []byte(nameEscaper.Replace(name))
}
func (m *metric) SetPrefix(prefix string) {
m.hashID = 0
m.name = append([]byte(nameEscaper.Replace(prefix)), m.name...)
}
func (m *metric) SetSuffix(suffix string) {
m.hashID = 0
m.name = append(m.name, []byte(nameEscaper.Replace(suffix))...)
}
func (m *metric) AddTag(key, value string) {
m.RemoveTag(key)
m.tags = append(m.tags, []byte(","+escape(key, "tagkey")+"="+escape(value, "tagval"))...)
}
func (m *metric) HasTag(key string) bool {
i := bytes.Index(m.tags, []byte(escape(key, "tagkey")+"="))
if i == -1 {
return false
}
return true
}
func (m *metric) RemoveTag(key string) {
m.hashID = 0
i := bytes.Index(m.tags, []byte(escape(key, "tagkey")+"="))
if i == -1 {
return
}
tmp := m.tags[0 : i-1]
j := indexUnescapedByte(m.tags[i:], ',')
if j != -1 {
tmp = append(tmp, m.tags[i+j:]...)
}
m.tags = tmp
return
}
func (m *metric) AddField(key string, value interface{}) {
m.fields = append(m.fields, ',')
m.fields = appendField(m.fields, key, value)
}
func (m *metric) HasField(key string) bool {
i := bytes.Index(m.fields, []byte(escape(key, "tagkey")+"="))
if i == -1 {
return false
}
return true
}
func (m *metric) RemoveField(key string) error {
i := bytes.Index(m.fields, []byte(escape(key, "tagkey")+"="))
if i == -1 {
return nil
}
var tmp []byte
if i != 0 {
tmp = m.fields[0 : i-1]
}
j := indexUnescapedByte(m.fields[i:], ',')
if j != -1 {
tmp = append(tmp, m.fields[i+j:]...)
}
if len(tmp) == 0 {
return fmt.Errorf("Metric cannot remove final field: %s", m.fields)
}
m.fields = tmp
return nil
}
func (m *metric) Copy() telegraf.Metric {
return copyWith(m.name, m.tags, m.fields, m.t)
}
func copyWith(name, tags, fields, t []byte) telegraf.Metric {
out := metric{
name: make([]byte, len(name)),
tags: make([]byte, len(tags)),
fields: make([]byte, len(fields)),
t: make([]byte, len(t)),
}
copy(out.name, name)
copy(out.tags, tags)
copy(out.fields, fields)
copy(out.t, t)
return &out
}
func (m *metric) HashID() uint64 {
if m.hashID == 0 {
h := fnv.New64a()
h.Write(m.name)
tags := m.Tags()
tmp := make([]string, len(tags))
i := 0
for k, v := range tags {
tmp[i] = k + v
i++
}
sort.Strings(tmp)
for _, s := range tmp {
h.Write([]byte(s))
}
m.hashID = h.Sum64()
h := fnv.New64a()
h.Write([]byte(m.name))
h.Write([]byte("\n"))
for _, tag := range m.tags {
h.Write([]byte(tag.Key))
h.Write([]byte("\n"))
h.Write([]byte(tag.Value))
h.Write([]byte("\n"))
}
return m.hashID
return h.Sum64()
}
func appendField(b []byte, k string, v interface{}) []byte {
if v == nil {
return b
}
b = append(b, []byte(escape(k, "tagkey")+"=")...)
// check popular types first
// Convert field to a supported type or nil if unconvertible
func convertField(v interface{}) interface{} {
switch v := v.(type) {
case float64:
b = strconv.AppendFloat(b, v, 'f', -1, 64)
return v
case int64:
b = strconv.AppendInt(b, v, 10)
b = append(b, 'i')
return v
case string:
b = append(b, '"')
b = append(b, []byte(escape(v, "fieldval"))...)
b = append(b, '"')
return v
case bool:
b = strconv.AppendBool(b, v)
case int32:
b = strconv.AppendInt(b, int64(v), 10)
b = append(b, 'i')
case int16:
b = strconv.AppendInt(b, int64(v), 10)
b = append(b, 'i')
case int8:
b = strconv.AppendInt(b, int64(v), 10)
b = append(b, 'i')
return v
case int:
b = strconv.AppendInt(b, int64(v), 10)
b = append(b, 'i')
case uint64:
// Cap uints above the maximum int value
var intv int64
if v <= uint64(MaxInt) {
intv = int64(v)
} else {
intv = int64(MaxInt)
}
b = strconv.AppendInt(b, intv, 10)
b = append(b, 'i')
case uint32:
b = strconv.AppendInt(b, int64(v), 10)
b = append(b, 'i')
case uint16:
b = strconv.AppendInt(b, int64(v), 10)
b = append(b, 'i')
case uint8:
b = strconv.AppendInt(b, int64(v), 10)
b = append(b, 'i')
return int64(v)
case uint:
// Cap uints above the maximum int value
var intv int64
if v <= uint(MaxInt) {
intv = int64(v)
} else {
intv = int64(MaxInt)
}
b = strconv.AppendInt(b, intv, 10)
b = append(b, 'i')
case float32:
b = strconv.AppendFloat(b, float64(v), 'f', -1, 32)
return uint64(v)
case uint64:
return uint64(v)
case []byte:
b = append(b, v...)
return string(v)
case int32:
return int64(v)
case int16:
return int64(v)
case int8:
return int64(v)
case uint32:
return uint64(v)
case uint16:
return uint64(v)
case uint8:
return uint64(v)
case float32:
return float64(v)
default:
// Can't determine the type, so convert to string
b = append(b, '"')
b = append(b, []byte(escape(fmt.Sprintf("%v", v), "fieldval"))...)
b = append(b, '"')
return nil
}
return b
}

View File

@@ -1,148 +0,0 @@
package metric
import (
"fmt"
"testing"
"time"
"github.com/influxdata/telegraf"
)
// vars for making sure that the compiler doesnt optimize out the benchmarks:
var (
s string
I interface{}
tags map[string]string
fields map[string]interface{}
)
func BenchmarkNewMetric(b *testing.B) {
var mt telegraf.Metric
for n := 0; n < b.N; n++ {
mt, _ = New("test_metric",
map[string]string{
"test_tag_1": "tag_value_1",
"test_tag_2": "tag_value_2",
"test_tag_3": "tag_value_3",
},
map[string]interface{}{
"string_field": "string",
"int_field": int64(1000),
"float_field": float64(2.1),
},
time.Now(),
)
}
s = string(mt.String())
}
func BenchmarkAddTag(b *testing.B) {
var mt telegraf.Metric
mt = &metric{
name: []byte("cpu"),
tags: []byte(",host=localhost"),
fields: []byte("a=101"),
t: []byte("1480614053000000000"),
}
for n := 0; n < b.N; n++ {
mt.AddTag("foo", "bar")
}
s = string(mt.String())
}
func BenchmarkSplit(b *testing.B) {
var mt telegraf.Metric
mt = &metric{
name: []byte("cpu"),
tags: []byte(",host=localhost"),
fields: []byte("a=101,b=10i,c=10101,d=101010,e=42"),
t: []byte("1480614053000000000"),
}
var metrics []telegraf.Metric
for n := 0; n < b.N; n++ {
metrics = mt.Split(60)
}
s = string(metrics[0].String())
}
func BenchmarkTags(b *testing.B) {
for n := 0; n < b.N; n++ {
var mt, _ = New("test_metric",
map[string]string{
"test_tag_1": "tag_value_1",
"test_tag_2": "tag_value_2",
"test_tag_3": "tag_value_3",
},
map[string]interface{}{
"string_field": "string",
"int_field": int64(1000),
"float_field": float64(2.1),
},
time.Now(),
)
tags = mt.Tags()
}
s = fmt.Sprint(tags)
}
func BenchmarkFields(b *testing.B) {
for n := 0; n < b.N; n++ {
var mt, _ = New("test_metric",
map[string]string{
"test_tag_1": "tag_value_1",
"test_tag_2": "tag_value_2",
"test_tag_3": "tag_value_3",
},
map[string]interface{}{
"string_field": "string",
"int_field": int64(1000),
"float_field": float64(2.1),
},
time.Now(),
)
fields = mt.Fields()
}
s = fmt.Sprint(fields)
}
func BenchmarkString(b *testing.B) {
mt, _ := New("test_metric",
map[string]string{
"test_tag_1": "tag_value_1",
"test_tag_2": "tag_value_2",
"test_tag_3": "tag_value_3",
},
map[string]interface{}{
"string_field": "string",
"int_field": int64(1000),
"float_field": float64(2.1),
},
time.Now(),
)
var S string
for n := 0; n < b.N; n++ {
S = mt.String()
}
s = S
}
func BenchmarkSerialize(b *testing.B) {
mt, _ := New("test_metric",
map[string]string{
"test_tag_1": "tag_value_1",
"test_tag_2": "tag_value_2",
"test_tag_3": "tag_value_3",
},
map[string]interface{}{
"string_field": "string",
"int_field": int64(1000),
"float_field": float64(2.1),
},
time.Now(),
)
var B []byte
for n := 0; n < b.N; n++ {
B = mt.Serialize()
}
s = string(B)
}

View File

@@ -1,15 +1,12 @@
package metric
import (
"fmt"
"math"
"regexp"
"testing"
"time"
"github.com/influxdata/telegraf"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestNewMetric(t *testing.T) {
@@ -24,102 +21,185 @@ func TestNewMetric(t *testing.T) {
"usage_busy": float64(1),
}
m, err := New("cpu", tags, fields, now)
assert.NoError(t, err)
require.NoError(t, err)
assert.Equal(t, telegraf.Untyped, m.Type())
assert.Equal(t, tags, m.Tags())
assert.Equal(t, fields, m.Fields())
assert.Equal(t, "cpu", m.Name())
assert.Equal(t, now, m.Time())
assert.Equal(t, now.UnixNano(), m.UnixNano())
require.Equal(t, "cpu", m.Name())
require.Equal(t, tags, m.Tags())
require.Equal(t, fields, m.Fields())
require.Equal(t, 2, len(m.FieldList()))
require.Equal(t, now, m.Time())
}
func TestNewErrors(t *testing.T) {
// creating a metric with an empty name produces an error:
m, err := New(
"",
map[string]string{
"datacenter": "us-east-1",
"mytag": "foo",
"another": "tag",
},
map[string]interface{}{
"value": float64(1),
},
time.Now(),
)
assert.Error(t, err)
assert.Nil(t, m)
// creating a metric with empty fields produces an error:
m, err = New(
"foobar",
map[string]string{
"datacenter": "us-east-1",
"mytag": "foo",
"another": "tag",
},
map[string]interface{}{},
time.Now(),
)
assert.Error(t, err)
assert.Nil(t, m)
}
func TestNewMetric_Tags(t *testing.T) {
now := time.Now()
tags := map[string]string{
"host": "localhost",
"datacenter": "us-east-1",
}
func baseMetric() telegraf.Metric {
tags := map[string]string{}
fields := map[string]interface{}{
"value": float64(1),
}
now := time.Now()
m, err := New("cpu", tags, fields, now)
assert.NoError(t, err)
if err != nil {
panic(err)
}
return m
}
assert.True(t, m.HasTag("host"))
assert.True(t, m.HasTag("datacenter"))
func TestHasTag(t *testing.T) {
m := baseMetric()
m.AddTag("newtag", "foo")
assert.True(t, m.HasTag("newtag"))
require.False(t, m.HasTag("host"))
m.AddTag("host", "localhost")
require.True(t, m.HasTag("host"))
m.RemoveTag("host")
require.False(t, m.HasTag("host"))
}
func TestAddTagOverwrites(t *testing.T) {
m := baseMetric()
m.AddTag("host", "localhost")
m.AddTag("host", "example.org")
value, ok := m.GetTag("host")
require.True(t, ok)
require.Equal(t, "example.org", value)
require.Equal(t, 1, len(m.TagList()))
}
func TestRemoveTagNoEffectOnMissingTags(t *testing.T) {
m := baseMetric()
m.RemoveTag("foo")
m.AddTag("a", "x")
m.RemoveTag("foo")
m.RemoveTag("bar")
value, ok := m.GetTag("a")
require.True(t, ok)
require.Equal(t, "x", value)
}
func TestGetTag(t *testing.T) {
m := baseMetric()
value, ok := m.GetTag("host")
require.False(t, ok)
m.AddTag("host", "localhost")
value, ok = m.GetTag("host")
require.True(t, ok)
require.Equal(t, "localhost", value)
m.RemoveTag("host")
assert.False(t, m.HasTag("host"))
assert.True(t, m.HasTag("newtag"))
assert.True(t, m.HasTag("datacenter"))
m.RemoveTag("datacenter")
assert.False(t, m.HasTag("datacenter"))
assert.True(t, m.HasTag("newtag"))
assert.Equal(t, map[string]string{"newtag": "foo"}, m.Tags())
m.RemoveTag("newtag")
assert.False(t, m.HasTag("newtag"))
assert.Equal(t, map[string]string{}, m.Tags())
assert.Equal(t, "cpu value=1 "+fmt.Sprint(now.UnixNano())+"\n", m.String())
value, ok = m.GetTag("host")
require.False(t, ok)
}
func TestSerialize(t *testing.T) {
func TestHasField(t *testing.T) {
m := baseMetric()
require.False(t, m.HasField("x"))
m.AddField("x", 42.0)
require.True(t, m.HasField("x"))
m.RemoveTag("x")
require.False(t, m.HasTag("x"))
}
func TestAddFieldOverwrites(t *testing.T) {
m := baseMetric()
m.AddField("value", 1.0)
m.AddField("value", 42.0)
value, ok := m.GetField("value")
require.True(t, ok)
require.Equal(t, 42.0, value)
}
func TestAddFieldChangesType(t *testing.T) {
m := baseMetric()
m.AddField("value", 1.0)
m.AddField("value", "xyzzy")
value, ok := m.GetField("value")
require.True(t, ok)
require.Equal(t, "xyzzy", value)
}
func TestRemoveFieldNoEffectOnMissingFields(t *testing.T) {
m := baseMetric()
m.RemoveField("foo")
m.AddField("a", "x")
m.RemoveField("foo")
m.RemoveField("bar")
value, ok := m.GetField("a")
require.True(t, ok)
require.Equal(t, "x", value)
}
func TestGetField(t *testing.T) {
m := baseMetric()
value, ok := m.GetField("foo")
require.False(t, ok)
m.AddField("foo", "bar")
value, ok = m.GetField("foo")
require.True(t, ok)
require.Equal(t, "bar", value)
m.RemoveTag("foo")
value, ok = m.GetTag("foo")
require.False(t, ok)
}
func TestTagList_Sorted(t *testing.T) {
m := baseMetric()
m.AddTag("b", "y")
m.AddTag("c", "z")
m.AddTag("a", "x")
taglist := m.TagList()
require.Equal(t, "a", taglist[0].Key)
require.Equal(t, "b", taglist[1].Key)
require.Equal(t, "c", taglist[2].Key)
}
func TestEquals(t *testing.T) {
now := time.Now()
tags := map[string]string{
"datacenter": "us-east-1",
}
fields := map[string]interface{}{
"value": float64(1),
}
m, err := New("cpu", tags, fields, now)
assert.NoError(t, err)
m1, err := New("cpu",
map[string]string{
"host": "localhost",
},
map[string]interface{}{
"value": 42.0,
},
now,
)
require.NoError(t, err)
assert.Equal(t,
[]byte("cpu,datacenter=us-east-1 value=1 "+fmt.Sprint(now.UnixNano())+"\n"),
m.Serialize())
m2, err := New("cpu",
map[string]string{
"host": "localhost",
},
map[string]interface{}{
"value": 42.0,
},
now,
)
require.NoError(t, err)
m.RemoveTag("datacenter")
assert.Equal(t,
[]byte("cpu value=1 "+fmt.Sprint(now.UnixNano())+"\n"),
m.Serialize())
lhs := m1.(*metric)
require.Equal(t, lhs, m2)
m3 := m2.Copy()
require.Equal(t, lhs, m3)
m3.AddTag("a", "x")
require.NotEqual(t, lhs, m3)
}
func TestHashID(t *testing.T) {
@@ -170,477 +250,88 @@ func TestHashID_Consistency(t *testing.T) {
)
hash := m.HashID()
for i := 0; i < 1000; i++ {
m2, _ := New(
"cpu",
map[string]string{
"datacenter": "us-east-1",
"mytag": "foo",
"another": "tag",
},
map[string]interface{}{
"value": float64(1),
},
time.Now(),
)
assert.Equal(t, hash, m2.HashID())
}
m2, _ := New(
"cpu",
map[string]string{
"datacenter": "us-east-1",
"mytag": "foo",
"another": "tag",
},
map[string]interface{}{
"value": float64(1),
},
time.Now(),
)
assert.Equal(t, hash, m2.HashID())
m3 := m.Copy()
assert.Equal(t, m2.HashID(), m3.HashID())
}
func TestNewMetric_NameModifiers(t *testing.T) {
func TestHashID_Delimiting(t *testing.T) {
m1, _ := New(
"cpu",
map[string]string{
"a": "x",
"b": "y",
"c": "z",
},
map[string]interface{}{
"value": float64(1),
},
time.Now(),
)
m2, _ := New(
"cpu",
map[string]string{
"a": "xbycz",
},
map[string]interface{}{
"value": float64(1),
},
time.Now(),
)
assert.NotEqual(t, m1.HashID(), m2.HashID())
}
func TestSetName(t *testing.T) {
m := baseMetric()
m.SetName("foo")
require.Equal(t, "foo", m.Name())
}
func TestAddPrefix(t *testing.T) {
m := baseMetric()
m.AddPrefix("foo_")
require.Equal(t, "foo_cpu", m.Name())
m.AddPrefix("foo_")
require.Equal(t, "foo_foo_cpu", m.Name())
}
func TestAddSuffix(t *testing.T) {
m := baseMetric()
m.AddSuffix("_foo")
require.Equal(t, "cpu_foo", m.Name())
m.AddSuffix("_foo")
require.Equal(t, "cpu_foo_foo", m.Name())
}
func TestValueType(t *testing.T) {
now := time.Now()
tags := map[string]string{}
fields := map[string]interface{}{
"value": float64(1),
}
m, err := New("cpu", tags, fields, now)
assert.NoError(t, err)
hash := m.HashID()
suffix := fmt.Sprintf(" value=1 %d\n", now.UnixNano())
assert.Equal(t, "cpu"+suffix, m.String())
m.SetPrefix("pre_")
assert.NotEqual(t, hash, m.HashID())
hash = m.HashID()
assert.Equal(t, "pre_cpu"+suffix, m.String())
m.SetSuffix("_post")
assert.NotEqual(t, hash, m.HashID())
hash = m.HashID()
assert.Equal(t, "pre_cpu_post"+suffix, m.String())
m.SetName("mem")
assert.NotEqual(t, hash, m.HashID())
assert.Equal(t, "mem"+suffix, m.String())
}
func TestNewMetric_FieldModifiers(t *testing.T) {
now := time.Now()
tags := map[string]string{
"host": "localhost",
}
fields := map[string]interface{}{
"value": float64(1),
}
m, err := New("cpu", tags, fields, now)
assert.NoError(t, err)
assert.True(t, m.HasField("value"))
assert.False(t, m.HasField("foo"))
m.AddField("newfield", "foo")
assert.True(t, m.HasField("newfield"))
assert.NoError(t, m.RemoveField("newfield"))
assert.False(t, m.HasField("newfield"))
// don't allow user to remove all fields:
assert.Error(t, m.RemoveField("value"))
m.AddField("value2", int64(101))
assert.NoError(t, m.RemoveField("value"))
assert.False(t, m.HasField("value"))
}
func TestNewMetric_Fields(t *testing.T) {
now := time.Now()
tags := map[string]string{
"host": "localhost",
}
fields := map[string]interface{}{
"float": float64(1),
"int": int64(1),
"bool": true,
"false": false,
"string": "test",
}
m, err := New("cpu", tags, fields, now)
assert.NoError(t, err)
assert.Equal(t, fields, m.Fields())
}
func TestNewMetric_Time(t *testing.T) {
now := time.Now()
tags := map[string]string{
"host": "localhost",
}
fields := map[string]interface{}{
"float": float64(1),
"int": int64(1),
"bool": true,
"false": false,
"string": "test",
}
m, err := New("cpu", tags, fields, now)
assert.NoError(t, err)
m = m.Copy()
m2 := m.Copy()
assert.Equal(t, now.UnixNano(), m.Time().UnixNano())
assert.Equal(t, now.UnixNano(), m2.UnixNano())
}
func TestNewMetric_Copy(t *testing.T) {
now := time.Now()
tags := map[string]string{}
fields := map[string]interface{}{
"float": float64(1),
}
m, err := New("cpu", tags, fields, now)
assert.NoError(t, err)
m2 := m.Copy()
assert.Equal(t,
fmt.Sprintf("cpu float=1 %d\n", now.UnixNano()),
m.String())
m.AddTag("host", "localhost")
assert.Equal(t,
fmt.Sprintf("cpu,host=localhost float=1 %d\n", now.UnixNano()),
m.String())
assert.Equal(t,
fmt.Sprintf("cpu float=1 %d\n", now.UnixNano()),
m2.String())
}
func TestNewMetric_AllTypes(t *testing.T) {
now := time.Now()
tags := map[string]string{}
fields := map[string]interface{}{
"float64": float64(1),
"float32": float32(1),
"int64": int64(1),
"int32": int32(1),
"int16": int16(1),
"int8": int8(1),
"int": int(1),
"uint64": uint64(1),
"uint32": uint32(1),
"uint16": uint16(1),
"uint8": uint8(1),
"uint": uint(1),
"bytes": []byte("foo"),
"nil": nil,
"maxuint64": uint64(MaxInt) + 10,
"maxuint": uint(MaxInt) + 10,
"unsupported": []int{1, 2},
}
m, err := New("cpu", tags, fields, now)
assert.NoError(t, err)
assert.Contains(t, m.String(), "float64=1")
assert.Contains(t, m.String(), "float32=1")
assert.Contains(t, m.String(), "int64=1i")
assert.Contains(t, m.String(), "int32=1i")
assert.Contains(t, m.String(), "int16=1i")
assert.Contains(t, m.String(), "int8=1i")
assert.Contains(t, m.String(), "int=1i")
assert.Contains(t, m.String(), "uint64=1i")
assert.Contains(t, m.String(), "uint32=1i")
assert.Contains(t, m.String(), "uint16=1i")
assert.Contains(t, m.String(), "uint8=1i")
assert.Contains(t, m.String(), "uint=1i")
assert.NotContains(t, m.String(), "nil")
assert.Contains(t, m.String(), fmt.Sprintf("maxuint64=%di", MaxInt))
assert.Contains(t, m.String(), fmt.Sprintf("maxuint=%di", MaxInt))
}
func TestIndexUnescapedByte(t *testing.T) {
tests := []struct {
in []byte
b byte
expected int
}{
{
in: []byte(`foobar`),
b: 'b',
expected: 3,
},
{
in: []byte(`foo\bar`),
b: 'b',
expected: -1,
},
{
in: []byte(`foo\\bar`),
b: 'b',
expected: 5,
},
{
in: []byte(`foobar`),
b: 'f',
expected: 0,
},
{
in: []byte(`foobar`),
b: 'r',
expected: 5,
},
{
in: []byte(`\foobar`),
b: 'f',
expected: -1,
},
}
for _, test := range tests {
got := indexUnescapedByte(test.in, test.b)
assert.Equal(t, test.expected, got)
}
}
func TestNewGaugeMetric(t *testing.T) {
now := time.Now()
tags := map[string]string{
"host": "localhost",
"datacenter": "us-east-1",
}
fields := map[string]interface{}{
"usage_idle": float64(99),
"usage_busy": float64(1),
"value": float64(42),
}
m, err := New("cpu", tags, fields, now, telegraf.Gauge)
assert.NoError(t, err)
assert.Equal(t, telegraf.Gauge, m.Type())
assert.Equal(t, tags, m.Tags())
assert.Equal(t, fields, m.Fields())
assert.Equal(t, "cpu", m.Name())
assert.Equal(t, now, m.Time())
assert.Equal(t, now.UnixNano(), m.UnixNano())
}
func TestNewCounterMetric(t *testing.T) {
now := time.Now()
tags := map[string]string{
"host": "localhost",
"datacenter": "us-east-1",
}
fields := map[string]interface{}{
"usage_idle": float64(99),
"usage_busy": float64(1),
}
m, err := New("cpu", tags, fields, now, telegraf.Counter)
assert.NoError(t, err)
assert.Equal(t, telegraf.Counter, m.Type())
assert.Equal(t, tags, m.Tags())
assert.Equal(t, fields, m.Fields())
assert.Equal(t, "cpu", m.Name())
assert.Equal(t, now, m.Time())
assert.Equal(t, now.UnixNano(), m.UnixNano())
}
// test splitting metric into various max lengths
func TestSplitMetric(t *testing.T) {
now := time.Unix(0, 1480940990034083306)
tags := map[string]string{
"host": "localhost",
}
fields := map[string]interface{}{
"float": float64(100001),
"int": int64(100001),
"bool": true,
"false": false,
"string": "test",
}
m, err := New("cpu", tags, fields, now)
assert.NoError(t, err)
split80 := m.Split(80)
assert.Len(t, split80, 2)
split70 := m.Split(70)
assert.Len(t, split70, 3)
split60 := m.Split(60)
assert.Len(t, split60, 4)
}
// test splitting metric into various max lengths
// use a simple regex check to verify that the split metrics are valid
func TestSplitMetric_RegexVerify(t *testing.T) {
now := time.Unix(0, 1480940990034083306)
tags := map[string]string{
"host": "localhost",
}
fields := map[string]interface{}{
"foo": float64(98934259085),
"bar": float64(19385292),
"number": float64(19385292),
"another": float64(19385292),
"n": float64(19385292),
}
m, err := New("cpu", tags, fields, now)
assert.NoError(t, err)
// verification regex
re := regexp.MustCompile(`cpu,host=localhost \w+=\d+(,\w+=\d+)* 1480940990034083306`)
split90 := m.Split(90)
assert.Len(t, split90, 2)
for _, splitM := range split90 {
assert.True(t, re.Match(splitM.Serialize()), splitM.String())
}
split70 := m.Split(70)
assert.Len(t, split70, 3)
for _, splitM := range split70 {
assert.True(t, re.Match(splitM.Serialize()), splitM.String())
}
split20 := m.Split(20)
assert.Len(t, split20, 5)
for _, splitM := range split20 {
assert.True(t, re.Match(splitM.Serialize()), splitM.String())
}
}
// test splitting metric even when given length is shorter than
// shortest possible length
// Split should split metric as short as possible, ie, 1 field per metric
func TestSplitMetric_TooShort(t *testing.T) {
now := time.Unix(0, 1480940990034083306)
tags := map[string]string{
"host": "localhost",
}
fields := map[string]interface{}{
"float": float64(100001),
"int": int64(100001),
"bool": true,
"false": false,
"string": "test",
}
m, err := New("cpu", tags, fields, now)
assert.NoError(t, err)
split := m.Split(10)
assert.Len(t, split, 5)
strings := make([]string, 5)
for i, splitM := range split {
strings[i] = splitM.String()
}
assert.Contains(t, strings, "cpu,host=localhost float=100001 1480940990034083306\n")
assert.Contains(t, strings, "cpu,host=localhost int=100001i 1480940990034083306\n")
assert.Contains(t, strings, "cpu,host=localhost bool=true 1480940990034083306\n")
assert.Contains(t, strings, "cpu,host=localhost false=false 1480940990034083306\n")
assert.Contains(t, strings, "cpu,host=localhost string=\"test\" 1480940990034083306\n")
}
func TestSplitMetric_NoOp(t *testing.T) {
now := time.Unix(0, 1480940990034083306)
tags := map[string]string{
"host": "localhost",
}
fields := map[string]interface{}{
"float": float64(100001),
"int": int64(100001),
"bool": true,
"false": false,
"string": "test",
}
m, err := New("cpu", tags, fields, now)
assert.NoError(t, err)
split := m.Split(1000)
assert.Len(t, split, 1)
assert.Equal(t, m, split[0])
}
func TestSplitMetric_OneField(t *testing.T) {
now := time.Unix(0, 1480940990034083306)
tags := map[string]string{
"host": "localhost",
}
fields := map[string]interface{}{
"float": float64(100001),
}
m, err := New("cpu", tags, fields, now)
assert.NoError(t, err)
assert.Equal(t, "cpu,host=localhost float=100001 1480940990034083306\n", m.String())
split := m.Split(1000)
assert.Len(t, split, 1)
assert.Equal(t, "cpu,host=localhost float=100001 1480940990034083306\n", split[0].String())
split = m.Split(1)
assert.Len(t, split, 1)
assert.Equal(t, "cpu,host=localhost float=100001 1480940990034083306\n", split[0].String())
split = m.Split(40)
assert.Len(t, split, 1)
assert.Equal(t, "cpu,host=localhost float=100001 1480940990034083306\n", split[0].String())
}
func TestNewMetricAggregate(t *testing.T) {
now := time.Now()
tags := map[string]string{
"host": "localhost",
}
fields := map[string]interface{}{
"usage_idle": float64(99),
}
m, err := New("cpu", tags, fields, now)
assert.NoError(t, err)
assert.False(t, m.IsAggregate())
m.SetAggregate(true)
assert.True(t, m.IsAggregate())
}
func TestNewMetricPoint(t *testing.T) {
now := time.Now()
tags := map[string]string{
"host": "localhost",
}
fields := map[string]interface{}{
"usage_idle": float64(99),
}
m, err := New("cpu", tags, fields, now)
assert.NoError(t, err)
p := m.Point()
assert.Equal(t, fields, m.Fields())
assert.Equal(t, fields, p.Fields())
assert.Equal(t, "cpu", p.Name())
}
func TestNewMetricString(t *testing.T) {
now := time.Now()
tags := map[string]string{
"host": "localhost",
}
fields := map[string]interface{}{
"usage_idle": float64(99),
}
m, err := New("cpu", tags, fields, now)
assert.NoError(t, err)
lineProto := fmt.Sprintf("cpu,host=localhost usage_idle=99 %d\n",
now.UnixNano())
assert.Equal(t, lineProto, m.String())
}
func TestNewMetricFailNaN(t *testing.T) {
now := time.Now()
tags := map[string]string{
"host": "localhost",
}
fields := map[string]interface{}{
"usage_idle": math.NaN(),
}
_, err := New("cpu", tags, fields, now)
assert.NoError(t, err)
func TestCopyAggreate(t *testing.T) {
m1 := baseMetric()
m1.SetAggregate(true)
m2 := m1.Copy()
assert.True(t, m2.IsAggregate())
}

View File

@@ -1,627 +0,0 @@
package metric
import (
"bytes"
"errors"
"fmt"
"time"
"github.com/influxdata/telegraf"
)
var (
ErrInvalidNumber = errors.New("invalid number")
)
const (
// the number of characters for the largest possible int64 (9223372036854775807)
maxInt64Digits = 19
// the number of characters for the smallest possible int64 (-9223372036854775808)
minInt64Digits = 20
// the number of characters required for the largest float64 before a range check
// would occur during parsing
maxFloat64Digits = 25
// the number of characters required for smallest float64 before a range check occur
// would occur during parsing
minFloat64Digits = 27
MaxKeyLength = 65535
)
// The following constants allow us to specify which state to move to
// next, when scanning sections of a Point.
const (
tagKeyState = iota
tagValueState
fieldsState
)
func Parse(buf []byte) ([]telegraf.Metric, error) {
return ParseWithDefaultTime(buf, time.Now())
}
func ParseWithDefaultTime(buf []byte, t time.Time) ([]telegraf.Metric, error) {
if len(buf) <= 6 {
return []telegraf.Metric{}, makeError("buffer too short", buf, 0)
}
metrics := make([]telegraf.Metric, 0, bytes.Count(buf, []byte("\n"))+1)
var errStr string
i := 0
for {
j := bytes.IndexByte(buf[i:], '\n')
if j == -1 {
break
}
if len(buf[i:i+j]) < 2 {
i += j + 1 // increment i past the previous newline
continue
}
m, err := parseMetric(buf[i:i+j], t)
if err != nil {
i += j + 1 // increment i past the previous newline
errStr += " " + err.Error()
continue
}
i += j + 1 // increment i past the previous newline
metrics = append(metrics, m)
}
if len(errStr) > 0 {
return metrics, fmt.Errorf(errStr)
}
return metrics, nil
}
func parseMetric(buf []byte, defaultTime time.Time) (telegraf.Metric, error) {
var dTime string
// scan the first block which is measurement[,tag1=value1,tag2=value=2...]
pos, key, err := scanKey(buf, 0)
if err != nil {
return nil, err
}
// measurement name is required
if len(key) == 0 {
return nil, fmt.Errorf("missing measurement")
}
if len(key) > MaxKeyLength {
return nil, fmt.Errorf("max key length exceeded: %v > %v", len(key), MaxKeyLength)
}
// scan the second block is which is field1=value1[,field2=value2,...]
pos, fields, err := scanFields(buf, pos)
if err != nil {
return nil, err
}
// at least one field is required
if len(fields) == 0 {
return nil, fmt.Errorf("missing fields")
}
// scan the last block which is an optional integer timestamp
pos, ts, err := scanTime(buf, pos)
if err != nil {
return nil, err
}
m := &metric{
fields: fields,
t: ts,
}
// parse out the measurement name
// namei is the index at which the "name" ends
namei := indexUnescapedByte(key, ',')
if namei < 1 {
// no tags
m.name = key
} else {
m.name = key[0:namei]
m.tags = key[namei:]
}
if len(m.t) == 0 {
if len(dTime) == 0 {
dTime = fmt.Sprint(defaultTime.UnixNano())
}
// use default time
m.t = []byte(dTime)
}
// here we copy on return because this allows us to later call
// AddTag, AddField, RemoveTag, RemoveField, etc. without worrying about
// modifying 'tag' bytes having an affect on 'field' bytes, for example.
return m.Copy(), nil
}
// scanKey scans buf starting at i for the measurement and tag portion of the point.
// It returns the ending position and the byte slice of key within buf. If there
// are tags, they will be sorted if they are not already.
func scanKey(buf []byte, i int) (int, []byte, error) {
start := skipWhitespace(buf, i)
i = start
// First scan the Point's measurement.
state, i, err := scanMeasurement(buf, i)
if err != nil {
return i, buf[start:i], err
}
// Optionally scan tags if needed.
if state == tagKeyState {
i, err = scanTags(buf, i)
if err != nil {
return i, buf[start:i], err
}
}
return i, buf[start:i], nil
}
// scanMeasurement examines the measurement part of a Point, returning
// the next state to move to, and the current location in the buffer.
func scanMeasurement(buf []byte, i int) (int, int, error) {
// Check first byte of measurement, anything except a comma is fine.
// It can't be a space, since whitespace is stripped prior to this
// function call.
if i >= len(buf) || buf[i] == ',' {
return -1, i, makeError("missing measurement", buf, i)
}
for {
i++
if i >= len(buf) {
// cpu
return -1, i, makeError("missing fields", buf, i)
}
if buf[i-1] == '\\' {
// Skip character (it's escaped).
continue
}
// Unescaped comma; move onto scanning the tags.
if buf[i] == ',' {
return tagKeyState, i + 1, nil
}
// Unescaped space; move onto scanning the fields.
if buf[i] == ' ' {
// cpu value=1.0
return fieldsState, i, nil
}
}
}
// scanTags examines all the tags in a Point, keeping track of and
// returning the updated indices slice, number of commas and location
// in buf where to start examining the Point fields.
func scanTags(buf []byte, i int) (int, error) {
var (
err error
state = tagKeyState
)
for {
switch state {
case tagKeyState:
i, err = scanTagsKey(buf, i)
state = tagValueState // tag value always follows a tag key
case tagValueState:
state, i, err = scanTagsValue(buf, i)
case fieldsState:
return i, nil
}
if err != nil {
return i, err
}
}
}
// scanTagsKey scans each character in a tag key.
func scanTagsKey(buf []byte, i int) (int, error) {
// First character of the key.
if i >= len(buf) || buf[i] == ' ' || buf[i] == ',' || buf[i] == '=' {
// cpu,{'', ' ', ',', '='}
return i, makeError("missing tag key", buf, i)
}
// Examine each character in the tag key until we hit an unescaped
// equals (the tag value), or we hit an error (i.e., unescaped
// space or comma).
for {
i++
// Either we reached the end of the buffer or we hit an
// unescaped comma or space.
if i >= len(buf) ||
((buf[i] == ' ' || buf[i] == ',') && buf[i-1] != '\\') {
// cpu,tag{'', ' ', ','}
return i, makeError("missing tag value", buf, i)
}
if buf[i] == '=' && buf[i-1] != '\\' {
// cpu,tag=
return i + 1, nil
}
}
}
// scanTagsValue scans each character in a tag value.
func scanTagsValue(buf []byte, i int) (int, int, error) {
// Tag value cannot be empty.
if i >= len(buf) || buf[i] == ',' || buf[i] == ' ' {
// cpu,tag={',', ' '}
return -1, i, makeError("missing tag value", buf, i)
}
// Examine each character in the tag value until we hit an unescaped
// comma (move onto next tag key), an unescaped space (move onto
// fields), or we error out.
for {
i++
if i >= len(buf) {
// cpu,tag=value
return -1, i, makeError("missing fields", buf, i)
}
// An unescaped equals sign is an invalid tag value.
if buf[i] == '=' && buf[i-1] != '\\' {
// cpu,tag={'=', 'fo=o'}
return -1, i, makeError("invalid tag format", buf, i)
}
if buf[i] == ',' && buf[i-1] != '\\' {
// cpu,tag=foo,
return tagKeyState, i + 1, nil
}
// cpu,tag=foo value=1.0
// cpu, tag=foo\= value=1.0
if buf[i] == ' ' && buf[i-1] != '\\' {
return fieldsState, i, nil
}
}
}
// scanFields scans buf, starting at i for the fields section of a point. It returns
// the ending position and the byte slice of the fields within buf
func scanFields(buf []byte, i int) (int, []byte, error) {
start := skipWhitespace(buf, i)
i = start
quoted := false
// tracks how many '=' we've seen
equals := 0
// tracks how many commas we've seen
commas := 0
for {
// reached the end of buf?
if i >= len(buf) {
break
}
// escaped characters?
if buf[i] == '\\' && i+1 < len(buf) {
i += 2
continue
}
// If the value is quoted, scan until we get to the end quote
// Only quote values in the field value since quotes are not significant
// in the field key
if buf[i] == '"' && equals > commas {
quoted = !quoted
i++
continue
}
// If we see an =, ensure that there is at least on char before and after it
if buf[i] == '=' && !quoted {
equals++
// check for "... =123" but allow "a\ =123"
if buf[i-1] == ' ' && buf[i-2] != '\\' {
return i, buf[start:i], makeError("missing field key", buf, i)
}
// check for "...a=123,=456" but allow "a=123,a\,=456"
if buf[i-1] == ',' && buf[i-2] != '\\' {
return i, buf[start:i], makeError("missing field key", buf, i)
}
// check for "... value="
if i+1 >= len(buf) {
return i, buf[start:i], makeError("missing field value", buf, i)
}
// check for "... value=,value2=..."
if buf[i+1] == ',' || buf[i+1] == ' ' {
return i, buf[start:i], makeError("missing field value", buf, i)
}
if isNumeric(buf[i+1]) || buf[i+1] == '-' || buf[i+1] == 'N' || buf[i+1] == 'n' {
var err error
i, err = scanNumber(buf, i+1)
if err != nil {
return i, buf[start:i], err
}
continue
}
// If next byte is not a double-quote, the value must be a boolean
if buf[i+1] != '"' {
var err error
i, _, err = scanBoolean(buf, i+1)
if err != nil {
return i, buf[start:i], err
}
continue
}
}
if buf[i] == ',' && !quoted {
commas++
}
// reached end of block?
if buf[i] == ' ' && !quoted {
break
}
i++
}
if quoted {
return i, buf[start:i], makeError("unbalanced quotes", buf, i)
}
// check that all field sections had key and values (e.g. prevent "a=1,b"
if equals == 0 || commas != equals-1 {
return i, buf[start:i], makeError("invalid field format", buf, i)
}
return i, buf[start:i], nil
}
// scanTime scans buf, starting at i for the time section of a point. It
// returns the ending position and the byte slice of the timestamp within buf
// and and error if the timestamp is not in the correct numeric format.
func scanTime(buf []byte, i int) (int, []byte, error) {
start := skipWhitespace(buf, i)
i = start
for {
// reached the end of buf?
if i >= len(buf) {
break
}
// Reached end of block or trailing whitespace?
if buf[i] == '\n' || buf[i] == ' ' {
break
}
// Handle negative timestamps
if i == start && buf[i] == '-' {
i++
continue
}
// Timestamps should be integers, make sure they are so we don't need
// to actually parse the timestamp until needed.
if buf[i] < '0' || buf[i] > '9' {
return i, buf[start:i], makeError("invalid timestamp", buf, i)
}
i++
}
return i, buf[start:i], nil
}
func isNumeric(b byte) bool {
return (b >= '0' && b <= '9') || b == '.'
}
// scanNumber returns the end position within buf, start at i after
// scanning over buf for an integer, or float. It returns an
// error if a invalid number is scanned.
func scanNumber(buf []byte, i int) (int, error) {
start := i
var isInt bool
// Is negative number?
if i < len(buf) && buf[i] == '-' {
i++
// There must be more characters now, as just '-' is illegal.
if i == len(buf) {
return i, ErrInvalidNumber
}
}
// how many decimal points we've see
decimal := false
// indicates the number is float in scientific notation
scientific := false
for {
if i >= len(buf) {
break
}
if buf[i] == ',' || buf[i] == ' ' {
break
}
if buf[i] == 'i' && i > start && !isInt {
isInt = true
i++
continue
}
if buf[i] == '.' {
// Can't have more than 1 decimal (e.g. 1.1.1 should fail)
if decimal {
return i, ErrInvalidNumber
}
decimal = true
}
// `e` is valid for floats but not as the first char
if i > start && (buf[i] == 'e' || buf[i] == 'E') {
scientific = true
i++
continue
}
// + and - are only valid at this point if they follow an e (scientific notation)
if (buf[i] == '+' || buf[i] == '-') && (buf[i-1] == 'e' || buf[i-1] == 'E') {
i++
continue
}
// NaN is an unsupported value
if i+2 < len(buf) && (buf[i] == 'N' || buf[i] == 'n') {
return i, ErrInvalidNumber
}
if !isNumeric(buf[i]) {
return i, ErrInvalidNumber
}
i++
}
if isInt && (decimal || scientific) {
return i, ErrInvalidNumber
}
numericDigits := i - start
if isInt {
numericDigits--
}
if decimal {
numericDigits--
}
if buf[start] == '-' {
numericDigits--
}
if numericDigits == 0 {
return i, ErrInvalidNumber
}
// It's more common that numbers will be within min/max range for their type but we need to prevent
// out or range numbers from being parsed successfully. This uses some simple heuristics to decide
// if we should parse the number to the actual type. It does not do it all the time because it incurs
// extra allocations and we end up converting the type again when writing points to disk.
if isInt {
// Make sure the last char is an 'i' for integers (e.g. 9i10 is not valid)
if buf[i-1] != 'i' {
return i, ErrInvalidNumber
}
// Parse the int to check bounds the number of digits could be larger than the max range
// We subtract 1 from the index to remove the `i` from our tests
if len(buf[start:i-1]) >= maxInt64Digits || len(buf[start:i-1]) >= minInt64Digits {
if _, err := parseIntBytes(buf[start:i-1], 10, 64); err != nil {
return i, makeError(fmt.Sprintf("unable to parse integer %s: %s", buf[start:i-1], err), buf, i)
}
}
} else {
// Parse the float to check bounds if it's scientific or the number of digits could be larger than the max range
if scientific || len(buf[start:i]) >= maxFloat64Digits || len(buf[start:i]) >= minFloat64Digits {
if _, err := parseFloatBytes(buf[start:i], 10); err != nil {
return i, makeError("invalid float", buf, i)
}
}
}
return i, nil
}
// scanBoolean returns the end position within buf, start at i after
// scanning over buf for boolean. Valid values for a boolean are
// t, T, true, TRUE, f, F, false, FALSE. It returns an error if a invalid boolean
// is scanned.
func scanBoolean(buf []byte, i int) (int, []byte, error) {
start := i
if i < len(buf) && (buf[i] != 't' && buf[i] != 'f' && buf[i] != 'T' && buf[i] != 'F') {
return i, buf[start:i], makeError("invalid value", buf, i)
}
i++
for {
if i >= len(buf) {
break
}
if buf[i] == ',' || buf[i] == ' ' {
break
}
i++
}
// Single char bool (t, T, f, F) is ok
if i-start == 1 {
return i, buf[start:i], nil
}
// length must be 4 for true or TRUE
if (buf[start] == 't' || buf[start] == 'T') && i-start != 4 {
return i, buf[start:i], makeError("invalid boolean", buf, i)
}
// length must be 5 for false or FALSE
if (buf[start] == 'f' || buf[start] == 'F') && i-start != 5 {
return i, buf[start:i], makeError("invalid boolean", buf, i)
}
// Otherwise
valid := false
switch buf[start] {
case 't':
valid = bytes.Equal(buf[start:i], []byte("true"))
case 'f':
valid = bytes.Equal(buf[start:i], []byte("false"))
case 'T':
valid = bytes.Equal(buf[start:i], []byte("TRUE")) || bytes.Equal(buf[start:i], []byte("True"))
case 'F':
valid = bytes.Equal(buf[start:i], []byte("FALSE")) || bytes.Equal(buf[start:i], []byte("False"))
}
if !valid {
return i, buf[start:i], makeError("invalid boolean", buf, i)
}
return i, buf[start:i], nil
}
// skipWhitespace returns the end position within buf, starting at i after
// scanning over spaces in tags
func skipWhitespace(buf []byte, i int) int {
for i < len(buf) {
if buf[i] != ' ' && buf[i] != '\t' && buf[i] != 0 {
break
}
i++
}
return i
}
// makeError is a helper function for making a metric parsing error.
// reason is the reason that the error occured.
// buf should be the current buffer we are parsing.
// i is the current index, to give some context on where in the buffer we are.
func makeError(reason string, buf []byte, i int) error {
return fmt.Errorf("metric parsing error, reason: [%s], buffer: [%s], index: [%d]",
reason, buf, i)
}

View File

@@ -1,355 +0,0 @@
package metric
import (
"testing"
"time"
"github.com/stretchr/testify/assert"
)
const trues = `booltest b=T
booltest b=t
booltest b=True
booltest b=TRUE
booltest b=true
`
const falses = `booltest b=F
booltest b=f
booltest b=False
booltest b=FALSE
booltest b=false
`
const withEscapes = `w\,\ eather,host=local temp=99 1465839830100400200
w\,eather,host=local temp=99 1465839830100400200
weather,location=us\,midwest temperature=82 1465839830100400200
weather,location=us-midwest temp\=rature=82 1465839830100400200
weather,location\ place=us-midwest temperature=82 1465839830100400200
weather,location=us-midwest temperature="too\"hot\"" 1465839830100400200
`
const withTimestamps = `cpu usage=99 1480595849000000000
cpu usage=99 1480595850000000000
cpu usage=99 1480595851700030000
cpu usage=99 1480595852000000300
`
const sevenMetrics = `cpu,host=foo,datacenter=us-east idle=99,busy=1i,b=true,s="string"
cpu,host=foo,datacenter=us-east idle=99,busy=1i,b=true,s="string"
cpu,host=foo,datacenter=us-east idle=99,busy=1i,b=true,s="string"
cpu,host=foo,datacenter=us-east idle=99,busy=1i,b=true,s="string"
cpu,host=foo,datacenter=us-east idle=99,busy=1i,b=true,s="string"
cpu,host=foo,datacenter=us-east idle=99,busy=1i,b=true,s="string"
cpu,host=foo,datacenter=us-east idle=99,busy=1i,b=true,s="string"
`
// some metrics are invalid
const someInvalid = `cpu,host=foo,datacenter=us-east usage_idle=99,usage_busy=1
cpu,host=foo,datacenter=us-east usage_idle=99,usage_busy=1
cpu,host=foo,datacenter=us-east usage_idle=99,usage_busy=1
cpu,cpu=cpu3, host=foo,datacenter=us-east usage_idle=99,usage_busy=1
cpu,cpu=cpu4 , usage_idle=99,usage_busy=1
cpu 1480595852000000300
cpu usage=99 1480595852foobar300
cpu,host=foo,datacenter=us-east usage_idle=99,usage_busy=1
`
func TestParse(t *testing.T) {
start := time.Now()
metrics, err := Parse([]byte(sevenMetrics))
assert.NoError(t, err)
assert.Len(t, metrics, 7)
// all metrics parsed together w/o a timestamp should have the same time.
firstTime := metrics[0].Time()
for _, m := range metrics {
assert.Equal(t,
map[string]interface{}{
"idle": float64(99),
"busy": int64(1),
"b": true,
"s": "string",
},
m.Fields(),
)
assert.Equal(t,
map[string]string{
"host": "foo",
"datacenter": "us-east",
},
m.Tags(),
)
assert.True(t, m.Time().After(start))
assert.True(t, m.Time().Equal(firstTime))
}
}
func TestParseErrors(t *testing.T) {
start := time.Now()
metrics, err := Parse([]byte(someInvalid))
assert.Error(t, err)
assert.Len(t, metrics, 4)
// all metrics parsed together w/o a timestamp should have the same time.
firstTime := metrics[0].Time()
for _, m := range metrics {
assert.Equal(t,
map[string]interface{}{
"usage_idle": float64(99),
"usage_busy": float64(1),
},
m.Fields(),
)
assert.Equal(t,
map[string]string{
"host": "foo",
"datacenter": "us-east",
},
m.Tags(),
)
assert.True(t, m.Time().After(start))
assert.True(t, m.Time().Equal(firstTime))
}
}
func TestParseWithTimestamps(t *testing.T) {
metrics, err := Parse([]byte(withTimestamps))
assert.NoError(t, err)
assert.Len(t, metrics, 4)
expectedTimestamps := []time.Time{
time.Unix(0, 1480595849000000000),
time.Unix(0, 1480595850000000000),
time.Unix(0, 1480595851700030000),
time.Unix(0, 1480595852000000300),
}
// all metrics parsed together w/o a timestamp should have the same time.
for i, m := range metrics {
assert.Equal(t,
map[string]interface{}{
"usage": float64(99),
},
m.Fields(),
)
assert.True(t, m.Time().Equal(expectedTimestamps[i]))
}
}
func TestParseEscapes(t *testing.T) {
metrics, err := Parse([]byte(withEscapes))
assert.NoError(t, err)
assert.Len(t, metrics, 6)
tests := []struct {
name string
fields map[string]interface{}
tags map[string]string
}{
{
name: `w, eather`,
fields: map[string]interface{}{"temp": float64(99)},
tags: map[string]string{"host": "local"},
},
{
name: `w,eather`,
fields: map[string]interface{}{"temp": float64(99)},
tags: map[string]string{"host": "local"},
},
{
name: `weather`,
fields: map[string]interface{}{"temperature": float64(82)},
tags: map[string]string{"location": `us,midwest`},
},
{
name: `weather`,
fields: map[string]interface{}{`temp=rature`: float64(82)},
tags: map[string]string{"location": `us-midwest`},
},
{
name: `weather`,
fields: map[string]interface{}{"temperature": float64(82)},
tags: map[string]string{`location place`: `us-midwest`},
},
{
name: `weather`,
fields: map[string]interface{}{`temperature`: `too"hot"`},
tags: map[string]string{"location": `us-midwest`},
},
}
for i, test := range tests {
assert.Equal(t, test.name, metrics[i].Name())
assert.Equal(t, test.fields, metrics[i].Fields())
assert.Equal(t, test.tags, metrics[i].Tags())
}
}
func TestParseTrueBooleans(t *testing.T) {
metrics, err := Parse([]byte(trues))
assert.NoError(t, err)
assert.Len(t, metrics, 5)
for _, metric := range metrics {
assert.Equal(t, "booltest", metric.Name())
assert.Equal(t, true, metric.Fields()["b"])
}
}
func TestParseFalseBooleans(t *testing.T) {
metrics, err := Parse([]byte(falses))
assert.NoError(t, err)
assert.Len(t, metrics, 5)
for _, metric := range metrics {
assert.Equal(t, "booltest", metric.Name())
assert.Equal(t, false, metric.Fields()["b"])
}
}
func TestParsePointBadNumber(t *testing.T) {
for _, tt := range []string{
"cpu v=- ",
"cpu v=-i ",
"cpu v=-. ",
"cpu v=. ",
"cpu v=1.0i ",
"cpu v=1ii ",
"cpu v=1a ",
"cpu v=-e-e-e ",
"cpu v=42+3 ",
"cpu v= ",
} {
_, err := Parse([]byte(tt + "\n"))
assert.Error(t, err, tt)
}
}
func TestParseTagsMissingParts(t *testing.T) {
for _, tt := range []string{
`cpu,host`,
`cpu,host,`,
`cpu,host=`,
`cpu,f=oo=bar value=1`,
`cpu,host value=1i`,
`cpu,host=serverA,region value=1i`,
`cpu,host=serverA,region= value=1i`,
`cpu,host=serverA,region=,zone=us-west value=1i`,
`cpu, value=1`,
`cpu, ,,`,
`cpu,,,`,
`cpu,host=serverA,=us-east value=1i`,
`cpu,host=serverAa\,,=us-east value=1i`,
`cpu,host=serverA\,,=us-east value=1i`,
`cpu, =serverA value=1i`,
} {
_, err := Parse([]byte(tt + "\n"))
assert.Error(t, err, tt)
}
}
func TestParsePointWhitespace(t *testing.T) {
for _, tt := range []string{
`cpu value=1.0 1257894000000000000`,
`cpu value=1.0 1257894000000000000`,
`cpu value=1.0 1257894000000000000`,
`cpu value=1.0 1257894000000000000 `,
} {
m, err := Parse([]byte(tt + "\n"))
assert.NoError(t, err, tt)
assert.Equal(t, "cpu", m[0].Name())
assert.Equal(t, map[string]interface{}{"value": float64(1)}, m[0].Fields())
}
}
func TestParsePointInvalidFields(t *testing.T) {
for _, tt := range []string{
"test,foo=bar a=101,=value",
"test,foo=bar =value",
"test,foo=bar a=101,key=",
"test,foo=bar key=",
`test,foo=bar a=101,b="foo`,
} {
_, err := Parse([]byte(tt + "\n"))
assert.Error(t, err, tt)
}
}
func TestParsePointNoFields(t *testing.T) {
for _, tt := range []string{
"cpu_load_short,host=server01,region=us-west",
"very_long_measurement_name",
"cpu,host==",
"============",
"cpu",
"cpu\n\n\n\n\n\n\n",
" ",
} {
_, err := Parse([]byte(tt + "\n"))
assert.Error(t, err, tt)
}
}
// a b=1 << this is the shortest possible metric
// any shorter is just ignored
func TestParseBufTooShort(t *testing.T) {
for _, tt := range []string{
"",
"a",
"a ",
"a b=",
} {
_, err := Parse([]byte(tt + "\n"))
assert.Error(t, err, tt)
}
}
func TestParseInvalidBooleans(t *testing.T) {
for _, tt := range []string{
"test b=tru",
"test b=fals",
"test b=faLse",
"test q=foo",
"test b=lambchops",
} {
_, err := Parse([]byte(tt + "\n"))
assert.Error(t, err, tt)
}
}
func TestParseInvalidNumbers(t *testing.T) {
for _, tt := range []string{
"test b=-",
"test b=1.1.1",
"test b=nan",
"test b=9i10",
"test b=9999999999999999999i",
} {
_, err := Parse([]byte(tt + "\n"))
assert.Error(t, err, tt)
}
}
func TestParseNegativeTimestamps(t *testing.T) {
for _, tt := range []string{
"test foo=101 -1257894000000000000",
} {
metrics, err := Parse([]byte(tt + "\n"))
assert.NoError(t, err, tt)
assert.True(t, metrics[0].Time().Equal(time.Unix(0, -1257894000000000000)))
}
}
func TestParseMaxKeyLength(t *testing.T) {
key := ""
for {
if len(key) > MaxKeyLength {
break
}
key += "test"
}
_, err := Parse([]byte(key + " value=1\n"))
assert.Error(t, err)
}

7
metric/uint_support.go Normal file
View File

@@ -0,0 +1,7 @@
// +build uint64
package metric
func init() {
EnableUintSupport()
}

View File

@@ -13,6 +13,12 @@ type Output interface {
Write(metrics []Metric) error
}
type AggregatingOutput interface {
Add(in Metric)
Push() []Metric
Reset()
}
type ServiceOutput interface {
// Connect to the Output
Connect() error

View File

@@ -1,5 +1,7 @@
package all
import (
_ "github.com/influxdata/telegraf/plugins/aggregators/basicstats"
_ "github.com/influxdata/telegraf/plugins/aggregators/histogram"
_ "github.com/influxdata/telegraf/plugins/aggregators/minmax"
)

View File

@@ -0,0 +1,56 @@
# BasicStats Aggregator Plugin
The BasicStats aggregator plugin give us count,max,min,mean,sum,s2(variance), stdev for a set of values,
emitting the aggregate every `period` seconds.
### Configuration:
```toml
# Keep the aggregate basicstats of each metric passing through.
[[aggregators.basicstats]]
## General Aggregator Arguments:
## The period on which to flush & clear the aggregator.
period = "30s"
## If true, the original metric will be dropped by the
## aggregator and will not get sent to the output plugins.
drop_original = false
## BasicStats Arguments:
## Configures which basic stats to push as fields
stats = ["count","min","max","mean","stdev","s2","sum"]
```
- stats
- If not specified, then `count`, `min`, `max`, `mean`, `stdev`, and `s2` are aggregated and pushed as fields. `sum` is not aggregated by default to maintain backwards compatibility.
- If empty array, no stats are aggregated
### Measurements & Fields:
- measurement1
- field1_count
- field1_max
- field1_min
- field1_mean
- field1_sum
- field1_s2 (variance)
- field1_stdev (standard deviation)
### Tags:
No tags are applied by this aggregator.
### Example Output:
```
$ telegraf --config telegraf.conf --quiet
system,host=tars load1=1 1475583980000000000
system,host=tars load1=1 1475583990000000000
system,host=tars load1_count=2,load1_max=1,load1_min=1,load1_mean=1,load1_sum=2,load1_s2=0,load1_stdev=0 1475584010000000000
system,host=tars load1=1 1475584020000000000
system,host=tars load1=3 1475584030000000000
system,host=tars load1_count=2,load1_max=3,load1_min=1,load1_mean=2,load1_sum=4,load1_s2=2,load1_stdev=1.414162 1475584010000000000
```

View File

@@ -0,0 +1,258 @@
package basicstats
import (
"log"
"math"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins/aggregators"
)
type BasicStats struct {
Stats []string `toml:"stats"`
cache map[uint64]aggregate
statsConfig *configuredStats
}
type configuredStats struct {
count bool
min bool
max bool
mean bool
variance bool
stdev bool
sum bool
}
func NewBasicStats() *BasicStats {
mm := &BasicStats{}
mm.Reset()
return mm
}
type aggregate struct {
fields map[string]basicstats
name string
tags map[string]string
}
type basicstats struct {
count float64
min float64
max float64
sum float64
mean float64
M2 float64 //intermedia value for variance/stdev
}
var sampleConfig = `
## General Aggregator Arguments:
## The period on which to flush & clear the aggregator.
period = "30s"
## If true, the original metric will be dropped by the
## aggregator and will not get sent to the output plugins.
drop_original = false
`
func (m *BasicStats) SampleConfig() string {
return sampleConfig
}
func (m *BasicStats) Description() string {
return "Keep the aggregate basicstats of each metric passing through."
}
func (m *BasicStats) Add(in telegraf.Metric) {
id := in.HashID()
if _, ok := m.cache[id]; !ok {
// hit an uncached metric, create caches for first time:
a := aggregate{
name: in.Name(),
tags: in.Tags(),
fields: make(map[string]basicstats),
}
for k, v := range in.Fields() {
if fv, ok := convert(v); ok {
a.fields[k] = basicstats{
count: 1,
min: fv,
max: fv,
mean: fv,
sum: fv,
M2: 0.0,
}
}
}
m.cache[id] = a
} else {
for k, v := range in.Fields() {
if fv, ok := convert(v); ok {
if _, ok := m.cache[id].fields[k]; !ok {
// hit an uncached field of a cached metric
m.cache[id].fields[k] = basicstats{
count: 1,
min: fv,
max: fv,
mean: fv,
sum: fv,
M2: 0.0,
}
continue
}
tmp := m.cache[id].fields[k]
//https://en.m.wikipedia.org/wiki/Algorithms_for_calculating_variance
//variable initialization
x := fv
mean := tmp.mean
M2 := tmp.M2
//counter compute
n := tmp.count + 1
tmp.count = n
//mean compute
delta := x - mean
mean = mean + delta/n
tmp.mean = mean
//variance/stdev compute
M2 = M2 + delta*(x-mean)
tmp.M2 = M2
//max/min compute
if fv < tmp.min {
tmp.min = fv
} else if fv > tmp.max {
tmp.max = fv
}
//sum compute
tmp.sum += fv
//store final data
m.cache[id].fields[k] = tmp
}
}
}
}
func (m *BasicStats) Push(acc telegraf.Accumulator) {
config := getConfiguredStats(m)
for _, aggregate := range m.cache {
fields := map[string]interface{}{}
for k, v := range aggregate.fields {
if config.count {
fields[k+"_count"] = v.count
}
if config.min {
fields[k+"_min"] = v.min
}
if config.max {
fields[k+"_max"] = v.max
}
if config.mean {
fields[k+"_mean"] = v.mean
}
if config.sum {
fields[k+"_sum"] = v.sum
}
//v.count always >=1
if v.count > 1 {
variance := v.M2 / (v.count - 1)
if config.variance {
fields[k+"_s2"] = variance
}
if config.stdev {
fields[k+"_stdev"] = math.Sqrt(variance)
}
}
//if count == 1 StdDev = infinite => so I won't send data
}
if len(fields) > 0 {
acc.AddFields(aggregate.name, fields, aggregate.tags)
}
}
}
func parseStats(names []string) *configuredStats {
parsed := &configuredStats{}
for _, name := range names {
switch name {
case "count":
parsed.count = true
case "min":
parsed.min = true
case "max":
parsed.max = true
case "mean":
parsed.mean = true
case "s2":
parsed.variance = true
case "stdev":
parsed.stdev = true
case "sum":
parsed.sum = true
default:
log.Printf("W! Unrecognized basic stat '%s', ignoring", name)
}
}
return parsed
}
func defaultStats() *configuredStats {
defaults := &configuredStats{}
defaults.count = true
defaults.min = true
defaults.max = true
defaults.mean = true
defaults.variance = true
defaults.stdev = true
defaults.sum = false
return defaults
}
func getConfiguredStats(m *BasicStats) *configuredStats {
if m.statsConfig == nil {
if m.Stats == nil {
m.statsConfig = defaultStats()
} else {
m.statsConfig = parseStats(m.Stats)
}
}
return m.statsConfig
}
func (m *BasicStats) Reset() {
m.cache = make(map[uint64]aggregate)
}
func convert(in interface{}) (float64, bool) {
switch v := in.(type) {
case float64:
return v, true
case int64:
return float64(v), true
default:
return 0, false
}
}
func init() {
aggregators.Add("basicstats", func() telegraf.Aggregator {
return NewBasicStats()
})
}

View File

@@ -0,0 +1,511 @@
package basicstats
import (
"math"
"testing"
"time"
"github.com/influxdata/telegraf/metric"
"github.com/influxdata/telegraf/testutil"
"github.com/stretchr/testify/assert"
)
var m1, _ = metric.New("m1",
map[string]string{"foo": "bar"},
map[string]interface{}{
"a": int64(1),
"b": int64(1),
"c": float64(2),
"d": float64(2),
},
time.Now(),
)
var m2, _ = metric.New("m1",
map[string]string{"foo": "bar"},
map[string]interface{}{
"a": int64(1),
"b": int64(3),
"c": float64(4),
"d": float64(6),
"e": float64(200),
"ignoreme": "string",
"andme": true,
},
time.Now(),
)
func BenchmarkApply(b *testing.B) {
minmax := NewBasicStats()
for n := 0; n < b.N; n++ {
minmax.Add(m1)
minmax.Add(m2)
}
}
// Test two metrics getting added.
func TestBasicStatsWithPeriod(t *testing.T) {
acc := testutil.Accumulator{}
minmax := NewBasicStats()
minmax.Add(m1)
minmax.Add(m2)
minmax.Push(&acc)
expectedFields := map[string]interface{}{
"a_count": float64(2), //a
"a_max": float64(1),
"a_min": float64(1),
"a_mean": float64(1),
"a_stdev": float64(0),
"a_s2": float64(0),
"b_count": float64(2), //b
"b_max": float64(3),
"b_min": float64(1),
"b_mean": float64(2),
"b_s2": float64(2),
"b_stdev": math.Sqrt(2),
"c_count": float64(2), //c
"c_max": float64(4),
"c_min": float64(2),
"c_mean": float64(3),
"c_s2": float64(2),
"c_stdev": math.Sqrt(2),
"d_count": float64(2), //d
"d_max": float64(6),
"d_min": float64(2),
"d_mean": float64(4),
"d_s2": float64(8),
"d_stdev": math.Sqrt(8),
"e_count": float64(1), //e
"e_max": float64(200),
"e_min": float64(200),
"e_mean": float64(200),
}
expectedTags := map[string]string{
"foo": "bar",
}
acc.AssertContainsTaggedFields(t, "m1", expectedFields, expectedTags)
}
// Test two metrics getting added with a push/reset in between (simulates
// getting added in different periods.)
func TestBasicStatsDifferentPeriods(t *testing.T) {
acc := testutil.Accumulator{}
minmax := NewBasicStats()
minmax.Add(m1)
minmax.Push(&acc)
expectedFields := map[string]interface{}{
"a_count": float64(1), //a
"a_max": float64(1),
"a_min": float64(1),
"a_mean": float64(1),
"b_count": float64(1), //b
"b_max": float64(1),
"b_min": float64(1),
"b_mean": float64(1),
"c_count": float64(1), //c
"c_max": float64(2),
"c_min": float64(2),
"c_mean": float64(2),
"d_count": float64(1), //d
"d_max": float64(2),
"d_min": float64(2),
"d_mean": float64(2),
}
expectedTags := map[string]string{
"foo": "bar",
}
acc.AssertContainsTaggedFields(t, "m1", expectedFields, expectedTags)
acc.ClearMetrics()
minmax.Reset()
minmax.Add(m2)
minmax.Push(&acc)
expectedFields = map[string]interface{}{
"a_count": float64(1), //a
"a_max": float64(1),
"a_min": float64(1),
"a_mean": float64(1),
"b_count": float64(1), //b
"b_max": float64(3),
"b_min": float64(3),
"b_mean": float64(3),
"c_count": float64(1), //c
"c_max": float64(4),
"c_min": float64(4),
"c_mean": float64(4),
"d_count": float64(1), //d
"d_max": float64(6),
"d_min": float64(6),
"d_mean": float64(6),
"e_count": float64(1), //e
"e_max": float64(200),
"e_min": float64(200),
"e_mean": float64(200),
}
expectedTags = map[string]string{
"foo": "bar",
}
acc.AssertContainsTaggedFields(t, "m1", expectedFields, expectedTags)
}
// Test only aggregating count
func TestBasicStatsWithOnlyCount(t *testing.T) {
aggregator := NewBasicStats()
aggregator.Stats = []string{"count"}
aggregator.Add(m1)
aggregator.Add(m2)
acc := testutil.Accumulator{}
aggregator.Push(&acc)
expectedFields := map[string]interface{}{
"a_count": float64(2),
"b_count": float64(2),
"c_count": float64(2),
"d_count": float64(2),
"e_count": float64(1),
}
expectedTags := map[string]string{
"foo": "bar",
}
acc.AssertContainsTaggedFields(t, "m1", expectedFields, expectedTags)
}
// Test only aggregating minimum
func TestBasicStatsWithOnlyMin(t *testing.T) {
aggregator := NewBasicStats()
aggregator.Stats = []string{"min"}
aggregator.Add(m1)
aggregator.Add(m2)
acc := testutil.Accumulator{}
aggregator.Push(&acc)
expectedFields := map[string]interface{}{
"a_min": float64(1),
"b_min": float64(1),
"c_min": float64(2),
"d_min": float64(2),
"e_min": float64(200),
}
expectedTags := map[string]string{
"foo": "bar",
}
acc.AssertContainsTaggedFields(t, "m1", expectedFields, expectedTags)
}
// Test only aggregating maximum
func TestBasicStatsWithOnlyMax(t *testing.T) {
aggregator := NewBasicStats()
aggregator.Stats = []string{"max"}
aggregator.Add(m1)
aggregator.Add(m2)
acc := testutil.Accumulator{}
aggregator.Push(&acc)
expectedFields := map[string]interface{}{
"a_max": float64(1),
"b_max": float64(3),
"c_max": float64(4),
"d_max": float64(6),
"e_max": float64(200),
}
expectedTags := map[string]string{
"foo": "bar",
}
acc.AssertContainsTaggedFields(t, "m1", expectedFields, expectedTags)
}
// Test only aggregating mean
func TestBasicStatsWithOnlyMean(t *testing.T) {
aggregator := NewBasicStats()
aggregator.Stats = []string{"mean"}
aggregator.Add(m1)
aggregator.Add(m2)
acc := testutil.Accumulator{}
aggregator.Push(&acc)
expectedFields := map[string]interface{}{
"a_mean": float64(1),
"b_mean": float64(2),
"c_mean": float64(3),
"d_mean": float64(4),
"e_mean": float64(200),
}
expectedTags := map[string]string{
"foo": "bar",
}
acc.AssertContainsTaggedFields(t, "m1", expectedFields, expectedTags)
}
// Test only aggregating sum
func TestBasicStatsWithOnlySum(t *testing.T) {
aggregator := NewBasicStats()
aggregator.Stats = []string{"sum"}
aggregator.Add(m1)
aggregator.Add(m2)
acc := testutil.Accumulator{}
aggregator.Push(&acc)
expectedFields := map[string]interface{}{
"a_sum": float64(2),
"b_sum": float64(4),
"c_sum": float64(6),
"d_sum": float64(8),
"e_sum": float64(200),
}
expectedTags := map[string]string{
"foo": "bar",
}
acc.AssertContainsTaggedFields(t, "m1", expectedFields, expectedTags)
}
// Verify that sum doesn't suffer from floating point errors. Early
// implementations of sum were calulated from mean and count, which
// e.g. summed "1, 1, 5, 1" as "7.999999..." instead of 8.
func TestBasicStatsWithOnlySumFloatingPointErrata(t *testing.T) {
var sum1, _ = metric.New("m1",
map[string]string{},
map[string]interface{}{
"a": int64(1),
},
time.Now(),
)
var sum2, _ = metric.New("m1",
map[string]string{},
map[string]interface{}{
"a": int64(1),
},
time.Now(),
)
var sum3, _ = metric.New("m1",
map[string]string{},
map[string]interface{}{
"a": int64(5),
},
time.Now(),
)
var sum4, _ = metric.New("m1",
map[string]string{},
map[string]interface{}{
"a": int64(1),
},
time.Now(),
)
aggregator := NewBasicStats()
aggregator.Stats = []string{"sum"}
aggregator.Add(sum1)
aggregator.Add(sum2)
aggregator.Add(sum3)
aggregator.Add(sum4)
acc := testutil.Accumulator{}
aggregator.Push(&acc)
expectedFields := map[string]interface{}{
"a_sum": float64(8),
}
expectedTags := map[string]string{}
acc.AssertContainsTaggedFields(t, "m1", expectedFields, expectedTags)
}
// Test only aggregating variance
func TestBasicStatsWithOnlyVariance(t *testing.T) {
aggregator := NewBasicStats()
aggregator.Stats = []string{"s2"}
aggregator.Add(m1)
aggregator.Add(m2)
acc := testutil.Accumulator{}
aggregator.Push(&acc)
expectedFields := map[string]interface{}{
"a_s2": float64(0),
"b_s2": float64(2),
"c_s2": float64(2),
"d_s2": float64(8),
}
expectedTags := map[string]string{
"foo": "bar",
}
acc.AssertContainsTaggedFields(t, "m1", expectedFields, expectedTags)
}
// Test only aggregating standard deviation
func TestBasicStatsWithOnlyStandardDeviation(t *testing.T) {
aggregator := NewBasicStats()
aggregator.Stats = []string{"stdev"}
aggregator.Add(m1)
aggregator.Add(m2)
acc := testutil.Accumulator{}
aggregator.Push(&acc)
expectedFields := map[string]interface{}{
"a_stdev": float64(0),
"b_stdev": math.Sqrt(2),
"c_stdev": math.Sqrt(2),
"d_stdev": math.Sqrt(8),
}
expectedTags := map[string]string{
"foo": "bar",
}
acc.AssertContainsTaggedFields(t, "m1", expectedFields, expectedTags)
}
// Test only aggregating minimum and maximum
func TestBasicStatsWithMinAndMax(t *testing.T) {
aggregator := NewBasicStats()
aggregator.Stats = []string{"min", "max"}
aggregator.Add(m1)
aggregator.Add(m2)
acc := testutil.Accumulator{}
aggregator.Push(&acc)
expectedFields := map[string]interface{}{
"a_max": float64(1), //a
"a_min": float64(1),
"b_max": float64(3), //b
"b_min": float64(1),
"c_max": float64(4), //c
"c_min": float64(2),
"d_max": float64(6), //d
"d_min": float64(2),
"e_max": float64(200), //e
"e_min": float64(200),
}
expectedTags := map[string]string{
"foo": "bar",
}
acc.AssertContainsTaggedFields(t, "m1", expectedFields, expectedTags)
}
// Test aggregating with all stats
func TestBasicStatsWithAllStats(t *testing.T) {
acc := testutil.Accumulator{}
minmax := NewBasicStats()
minmax.Stats = []string{"count", "min", "max", "mean", "stdev", "s2", "sum"}
minmax.Add(m1)
minmax.Add(m2)
minmax.Push(&acc)
expectedFields := map[string]interface{}{
"a_count": float64(2), //a
"a_max": float64(1),
"a_min": float64(1),
"a_mean": float64(1),
"a_stdev": float64(0),
"a_s2": float64(0),
"a_sum": float64(2),
"b_count": float64(2), //b
"b_max": float64(3),
"b_min": float64(1),
"b_mean": float64(2),
"b_s2": float64(2),
"b_sum": float64(4),
"b_stdev": math.Sqrt(2),
"c_count": float64(2), //c
"c_max": float64(4),
"c_min": float64(2),
"c_mean": float64(3),
"c_s2": float64(2),
"c_stdev": math.Sqrt(2),
"c_sum": float64(6),
"d_count": float64(2), //d
"d_max": float64(6),
"d_min": float64(2),
"d_mean": float64(4),
"d_s2": float64(8),
"d_stdev": math.Sqrt(8),
"d_sum": float64(8),
"e_count": float64(1), //e
"e_max": float64(200),
"e_min": float64(200),
"e_mean": float64(200),
"e_sum": float64(200),
}
expectedTags := map[string]string{
"foo": "bar",
}
acc.AssertContainsTaggedFields(t, "m1", expectedFields, expectedTags)
}
// Test that if an empty array is passed, no points are pushed
func TestBasicStatsWithNoStats(t *testing.T) {
aggregator := NewBasicStats()
aggregator.Stats = []string{}
aggregator.Add(m1)
aggregator.Add(m2)
acc := testutil.Accumulator{}
aggregator.Push(&acc)
acc.AssertDoesNotContainMeasurement(t, "m1")
}
// Test that if an unknown stat is configured, it doesn't explode
func TestBasicStatsWithUnknownStat(t *testing.T) {
aggregator := NewBasicStats()
aggregator.Stats = []string{"crazy"}
aggregator.Add(m1)
aggregator.Add(m2)
acc := testutil.Accumulator{}
aggregator.Push(&acc)
acc.AssertDoesNotContainMeasurement(t, "m1")
}
// Test that if Stats isn't supplied, then we only do count, min, max, mean,
// stdev, and s2. We purposely exclude sum for backwards compatability,
// otherwise user's working systems will suddenly (and surprisingly) start
// capturing sum without their input.
func TestBasicStatsWithDefaultStats(t *testing.T) {
aggregator := NewBasicStats()
aggregator.Add(m1)
aggregator.Add(m2)
acc := testutil.Accumulator{}
aggregator.Push(&acc)
assert.True(t, acc.HasField("m1", "a_count"))
assert.True(t, acc.HasField("m1", "a_min"))
assert.True(t, acc.HasField("m1", "a_max"))
assert.True(t, acc.HasField("m1", "a_mean"))
assert.True(t, acc.HasField("m1", "a_stdev"))
assert.True(t, acc.HasField("m1", "a_s2"))
assert.False(t, acc.HasField("m1", "a_sum"))
}

View File

@@ -0,0 +1,97 @@
# Histogram Aggregator Plugin
The histogram aggregator plugin creates histograms containing the counts of
field values within a range.
Values added to a bucket are also added to the larger buckets in the
distribution. This creates a [cumulative histogram](https://en.wikipedia.org/wiki/Histogram#/media/File:Cumulative_vs_normal_histogram.svg).
Like other Telegraf aggregators, the metric is emitted every `period` seconds.
Bucket counts however are not reset between periods and will be non-strictly
increasing while Telegraf is running.
#### Design
Each metric is passed to the aggregator and this aggregator searches
histogram buckets for those fields, which have been specified in the
config. If buckets are found, the aggregator will increment +1 to the appropriate
bucket otherwise it will be added to the `+Inf` bucket. Every `period`
seconds this data will be forwarded to the outputs.
The algorithm of hit counting to buckets was implemented on the base
of the algorithm which is implemented in the Prometheus
[client](https://github.com/prometheus/client_golang/blob/master/prometheus/histogram.go).
### Configuration
```toml
# Configuration for aggregate histogram metrics
[[aggregators.histogram]]
## The period in which to flush the aggregator.
period = "30s"
## If true, the original metric will be dropped by the
## aggregator and will not get sent to the output plugins.
drop_original = false
## Example config that aggregates all fields of the metric.
# [[aggregators.histogram.config]]
# ## The set of buckets.
# buckets = [0.0, 15.6, 34.5, 49.1, 71.5, 80.5, 94.5, 100.0]
# ## The name of metric.
# measurement_name = "cpu"
## Example config that aggregates only specific fields of the metric.
# [[aggregators.histogram.config]]
# ## The set of buckets.
# buckets = [0.0, 10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
# ## The name of metric.
# measurement_name = "diskio"
# ## The concrete fields of metric
# fields = ["io_time", "read_time", "write_time"]
```
The user is responsible for defining the bounds of the histogram bucket as
well as the measurement name and fields to aggregate.
Each histogram config section must contain a `buckets` and `measurement_name`
option. Optionally, if `fields` is set only the fields listed will be
aggregated. If `fields` is not set all fields are aggregated.
The `buckets` option contains a list of floats which specify the bucket
boundaries. Each float value defines the inclusive upper bound of the bucket.
The `+Inf` bucket is added automatically and does not need to be defined.
### Measurements & Fields:
The postfix `bucket` will be added to each field key.
- measurement1
- field1_bucket
- field2_bucket
### Tags:
All measurements are given the tag `le`. This tag has the border value of
bucket. It means that the metric value is less than or equal to the value of
this tag. For example, let assume that we have the metric value 10 and the
following buckets: [5, 10, 30, 70, 100]. Then the tag `le` will have the value
10, because the metrics value is passed into bucket with right border value
`10`.
### Example Output:
```
cpu,cpu=cpu1,host=localhost,le=0.0 usage_idle_bucket=0i 1486998330000000000
cpu,cpu=cpu1,host=localhost,le=10.0 usage_idle_bucket=0i 1486998330000000000
cpu,cpu=cpu1,host=localhost,le=20.0 usage_idle_bucket=1i 1486998330000000000
cpu,cpu=cpu1,host=localhost,le=30.0 usage_idle_bucket=2i 1486998330000000000
cpu,cpu=cpu1,host=localhost,le=40.0 usage_idle_bucket=2i 1486998330000000000
cpu,cpu=cpu1,host=localhost,le=50.0 usage_idle_bucket=2i 1486998330000000000
cpu,cpu=cpu1,host=localhost,le=60.0 usage_idle_bucket=2i 1486998330000000000
cpu,cpu=cpu1,host=localhost,le=70.0 usage_idle_bucket=2i 1486998330000000000
cpu,cpu=cpu1,host=localhost,le=80.0 usage_idle_bucket=2i 1486998330000000000
cpu,cpu=cpu1,host=localhost,le=90.0 usage_idle_bucket=2i 1486998330000000000
cpu,cpu=cpu1,host=localhost,le=100.0 usage_idle_bucket=2i 1486998330000000000
cpu,cpu=cpu1,host=localhost,le=+Inf usage_idle_bucket=2i 1486998330000000000
```

View File

@@ -0,0 +1,315 @@
package histogram
import (
"sort"
"strconv"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins/aggregators"
)
// bucketTag is the tag, which contains right bucket border
const bucketTag = "le"
// bucketInf is the right bucket border for infinite values
const bucketInf = "+Inf"
// HistogramAggregator is aggregator with histogram configs and particular histograms for defined metrics
type HistogramAggregator struct {
Configs []config `toml:"config"`
buckets bucketsByMetrics
cache map[uint64]metricHistogramCollection
}
// config is the config, which contains name, field of metric and histogram buckets.
type config struct {
Metric string `toml:"measurement_name"`
Fields []string `toml:"fields"`
Buckets buckets `toml:"buckets"`
}
// bucketsByMetrics contains the buckets grouped by metric and field name
type bucketsByMetrics map[string]bucketsByFields
// bucketsByFields contains the buckets grouped by field name
type bucketsByFields map[string]buckets
// buckets contains the right borders buckets
type buckets []float64
// metricHistogramCollection aggregates the histogram data
type metricHistogramCollection struct {
histogramCollection map[string]counts
name string
tags map[string]string
}
// counts is the number of hits in the bucket
type counts []int64
// groupedByCountFields contains grouped fields by their count and fields values
type groupedByCountFields struct {
name string
tags map[string]string
fieldsWithCount map[string]int64
}
// NewHistogramAggregator creates new histogram aggregator
func NewHistogramAggregator() telegraf.Aggregator {
h := &HistogramAggregator{}
h.buckets = make(bucketsByMetrics)
h.resetCache()
return h
}
var sampleConfig = `
## The period in which to flush the aggregator.
period = "30s"
## If true, the original metric will be dropped by the
## aggregator and will not get sent to the output plugins.
drop_original = false
## Example config that aggregates all fields of the metric.
# [[aggregators.histogram.config]]
# ## The set of buckets.
# buckets = [0.0, 15.6, 34.5, 49.1, 71.5, 80.5, 94.5, 100.0]
# ## The name of metric.
# measurement_name = "cpu"
## Example config that aggregates only specific fields of the metric.
# [[aggregators.histogram.config]]
# ## The set of buckets.
# buckets = [0.0, 10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
# ## The name of metric.
# measurement_name = "diskio"
# ## The concrete fields of metric
# fields = ["io_time", "read_time", "write_time"]
`
// SampleConfig returns sample of config
func (h *HistogramAggregator) SampleConfig() string {
return sampleConfig
}
// Description returns description of aggregator plugin
func (h *HistogramAggregator) Description() string {
return "Create aggregate histograms."
}
// Add adds new hit to the buckets
func (h *HistogramAggregator) Add(in telegraf.Metric) {
bucketsByField := make(map[string][]float64)
for field := range in.Fields() {
buckets := h.getBuckets(in.Name(), field)
if buckets != nil {
bucketsByField[field] = buckets
}
}
if len(bucketsByField) == 0 {
return
}
id := in.HashID()
agr, ok := h.cache[id]
if !ok {
agr = metricHistogramCollection{
name: in.Name(),
tags: in.Tags(),
histogramCollection: make(map[string]counts),
}
}
for field, value := range in.Fields() {
if buckets, ok := bucketsByField[field]; ok {
if agr.histogramCollection[field] == nil {
agr.histogramCollection[field] = make(counts, len(buckets)+1)
}
if value, ok := convert(value); ok {
index := sort.SearchFloat64s(buckets, value)
agr.histogramCollection[field][index]++
}
}
}
h.cache[id] = agr
}
// Push returns histogram values for metrics
func (h *HistogramAggregator) Push(acc telegraf.Accumulator) {
metricsWithGroupedFields := []groupedByCountFields{}
for _, aggregate := range h.cache {
for field, counts := range aggregate.histogramCollection {
h.groupFieldsByBuckets(&metricsWithGroupedFields, aggregate.name, field, copyTags(aggregate.tags), counts)
}
}
for _, metric := range metricsWithGroupedFields {
acc.AddFields(metric.name, makeFieldsWithCount(metric.fieldsWithCount), metric.tags)
}
}
// groupFieldsByBuckets groups fields by metric buckets which are represented as tags
func (h *HistogramAggregator) groupFieldsByBuckets(
metricsWithGroupedFields *[]groupedByCountFields,
name string,
field string,
tags map[string]string,
counts []int64,
) {
count := int64(0)
for index, bucket := range h.getBuckets(name, field) {
count += counts[index]
tags[bucketTag] = strconv.FormatFloat(bucket, 'f', -1, 64)
h.groupField(metricsWithGroupedFields, name, field, count, copyTags(tags))
}
count += counts[len(counts)-1]
tags[bucketTag] = bucketInf
h.groupField(metricsWithGroupedFields, name, field, count, tags)
}
// groupField groups field by count value
func (h *HistogramAggregator) groupField(
metricsWithGroupedFields *[]groupedByCountFields,
name string,
field string,
count int64,
tags map[string]string,
) {
for key, metric := range *metricsWithGroupedFields {
if name == metric.name && isTagsIdentical(tags, metric.tags) {
(*metricsWithGroupedFields)[key].fieldsWithCount[field] = count
return
}
}
fieldsWithCount := map[string]int64{
field: count,
}
*metricsWithGroupedFields = append(
*metricsWithGroupedFields,
groupedByCountFields{name: name, tags: tags, fieldsWithCount: fieldsWithCount},
)
}
// Reset does nothing, because we need to collect counts for a long time, otherwise if config parameter 'reset' has
// small value, we will get a histogram with a small amount of the distribution.
func (h *HistogramAggregator) Reset() {}
// resetCache resets cached counts(hits) in the buckets
func (h *HistogramAggregator) resetCache() {
h.cache = make(map[uint64]metricHistogramCollection)
}
// getBuckets finds buckets and returns them
func (h *HistogramAggregator) getBuckets(metric string, field string) []float64 {
if buckets, ok := h.buckets[metric][field]; ok {
return buckets
}
for _, config := range h.Configs {
if config.Metric == metric {
if !isBucketExists(field, config) {
continue
}
if _, ok := h.buckets[metric]; !ok {
h.buckets[metric] = make(bucketsByFields)
}
h.buckets[metric][field] = sortBuckets(config.Buckets)
}
}
return h.buckets[metric][field]
}
// isBucketExists checks if buckets exists for the passed field
func isBucketExists(field string, cfg config) bool {
if len(cfg.Fields) == 0 {
return true
}
for _, fl := range cfg.Fields {
if fl == field {
return true
}
}
return false
}
// sortBuckets sorts the buckets if it is needed
func sortBuckets(buckets []float64) []float64 {
for i, bucket := range buckets {
if i < len(buckets)-1 && bucket >= buckets[i+1] {
sort.Float64s(buckets)
break
}
}
return buckets
}
// convert converts interface to concrete type
func convert(in interface{}) (float64, bool) {
switch v := in.(type) {
case float64:
return v, true
case int64:
return float64(v), true
default:
return 0, false
}
}
// copyTags copies tags
func copyTags(tags map[string]string) map[string]string {
copiedTags := map[string]string{}
for key, val := range tags {
copiedTags[key] = val
}
return copiedTags
}
// isTagsIdentical checks the identity of two list of tags
func isTagsIdentical(originalTags, checkedTags map[string]string) bool {
if len(originalTags) != len(checkedTags) {
return false
}
for tagName, tagValue := range originalTags {
if tagValue != checkedTags[tagName] {
return false
}
}
return true
}
// makeFieldsWithCount assigns count value to all metric fields
func makeFieldsWithCount(fieldsWithCountIn map[string]int64) map[string]interface{} {
fieldsWithCountOut := map[string]interface{}{}
for field, count := range fieldsWithCountIn {
fieldsWithCountOut[field+"_bucket"] = count
}
return fieldsWithCountOut
}
// init initializes histogram aggregator plugin
func init() {
aggregators.Add("histogram", func() telegraf.Aggregator {
return NewHistogramAggregator()
})
}

View File

@@ -0,0 +1,210 @@
package histogram
import (
"fmt"
"testing"
"time"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/metric"
"github.com/influxdata/telegraf/testutil"
"github.com/stretchr/testify/assert"
)
// NewTestHistogram creates new test histogram aggregation with specified config
func NewTestHistogram(cfg []config) telegraf.Aggregator {
htm := &HistogramAggregator{Configs: cfg}
htm.buckets = make(bucketsByMetrics)
htm.resetCache()
return htm
}
// firstMetric1 is the first test metric
var firstMetric1, _ = metric.New(
"first_metric_name",
map[string]string{"tag_name": "tag_value"},
map[string]interface{}{
"a": float64(15.3),
"b": float64(40),
},
time.Now(),
)
// firstMetric1 is the first test metric with other value
var firstMetric2, _ = metric.New(
"first_metric_name",
map[string]string{"tag_name": "tag_value"},
map[string]interface{}{
"a": float64(15.9),
"c": float64(40),
},
time.Now(),
)
// secondMetric is the second metric
var secondMetric, _ = metric.New(
"second_metric_name",
map[string]string{"tag_name": "tag_value"},
map[string]interface{}{
"a": float64(105),
"ignoreme": "string",
"andme": true,
},
time.Now(),
)
// BenchmarkApply runs benchmarks
func BenchmarkApply(b *testing.B) {
histogram := NewHistogramAggregator()
for n := 0; n < b.N; n++ {
histogram.Add(firstMetric1)
histogram.Add(firstMetric2)
histogram.Add(secondMetric)
}
}
// TestHistogramWithPeriodAndOneField tests metrics for one period and for one field
func TestHistogramWithPeriodAndOneField(t *testing.T) {
var cfg []config
cfg = append(cfg, config{Metric: "first_metric_name", Fields: []string{"a"}, Buckets: []float64{0.0, 10.0, 20.0, 30.0, 40.0}})
histogram := NewTestHistogram(cfg)
acc := &testutil.Accumulator{}
histogram.Add(firstMetric1)
histogram.Add(firstMetric2)
histogram.Push(acc)
if len(acc.Metrics) != 6 {
assert.Fail(t, "Incorrect number of metrics")
}
assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(0)}, "0")
assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(0)}, "10")
assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(2)}, "20")
assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(2)}, "30")
assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(2)}, "40")
assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(2)}, bucketInf)
}
// TestHistogramWithPeriodAndAllFields tests two metrics for one period and for all fields
func TestHistogramWithPeriodAndAllFields(t *testing.T) {
var cfg []config
cfg = append(cfg, config{Metric: "first_metric_name", Buckets: []float64{0.0, 15.5, 20.0, 30.0, 40.0}})
cfg = append(cfg, config{Metric: "second_metric_name", Buckets: []float64{0.0, 4.0, 10.0, 23.0, 30.0}})
histogram := NewTestHistogram(cfg)
acc := &testutil.Accumulator{}
histogram.Add(firstMetric1)
histogram.Add(firstMetric2)
histogram.Add(secondMetric)
histogram.Push(acc)
if len(acc.Metrics) != 12 {
assert.Fail(t, "Incorrect number of metrics")
}
assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(0), "b_bucket": int64(0), "c_bucket": int64(0)}, "0")
assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(1), "b_bucket": int64(0), "c_bucket": int64(0)}, "15.5")
assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(2), "b_bucket": int64(0), "c_bucket": int64(0)}, "20")
assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(2), "b_bucket": int64(0), "c_bucket": int64(0)}, "30")
assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(2), "b_bucket": int64(1), "c_bucket": int64(1)}, "40")
assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(2), "b_bucket": int64(1), "c_bucket": int64(1)}, bucketInf)
assertContainsTaggedField(t, acc, "second_metric_name", map[string]interface{}{"a_bucket": int64(0), "ignoreme_bucket": int64(0), "andme_bucket": int64(0)}, "0")
assertContainsTaggedField(t, acc, "second_metric_name", map[string]interface{}{"a_bucket": int64(0), "ignoreme_bucket": int64(0), "andme_bucket": int64(0)}, "4")
assertContainsTaggedField(t, acc, "second_metric_name", map[string]interface{}{"a_bucket": int64(0), "ignoreme_bucket": int64(0), "andme_bucket": int64(0)}, "10")
assertContainsTaggedField(t, acc, "second_metric_name", map[string]interface{}{"a_bucket": int64(0), "ignoreme_bucket": int64(0), "andme_bucket": int64(0)}, "23")
assertContainsTaggedField(t, acc, "second_metric_name", map[string]interface{}{"a_bucket": int64(0), "ignoreme_bucket": int64(0), "andme_bucket": int64(0)}, "30")
assertContainsTaggedField(t, acc, "second_metric_name", map[string]interface{}{"a_bucket": int64(1), "ignoreme_bucket": int64(0), "andme_bucket": int64(0)}, bucketInf)
}
// TestHistogramDifferentPeriodsAndAllFields tests two metrics getting added with a push/reset in between (simulates
// getting added in different periods) for all fields
func TestHistogramDifferentPeriodsAndAllFields(t *testing.T) {
var cfg []config
cfg = append(cfg, config{Metric: "first_metric_name", Buckets: []float64{0.0, 10.0, 20.0, 30.0, 40.0}})
histogram := NewTestHistogram(cfg)
acc := &testutil.Accumulator{}
histogram.Add(firstMetric1)
histogram.Push(acc)
assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(0), "b_bucket": int64(0)}, "0")
assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(0), "b_bucket": int64(0)}, "10")
assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(1), "b_bucket": int64(0)}, "20")
assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(1), "b_bucket": int64(0)}, "30")
assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(1), "b_bucket": int64(1)}, "40")
assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(1), "b_bucket": int64(1)}, bucketInf)
acc.ClearMetrics()
histogram.Add(firstMetric2)
histogram.Push(acc)
assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(0), "b_bucket": int64(0), "c_bucket": int64(0)}, "0")
assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(0), "b_bucket": int64(0), "c_bucket": int64(0)}, "10")
assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(2), "b_bucket": int64(0), "c_bucket": int64(0)}, "20")
assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(2), "b_bucket": int64(0), "c_bucket": int64(0)}, "30")
assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(2), "b_bucket": int64(1), "c_bucket": int64(1)}, "40")
assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(2), "b_bucket": int64(1), "c_bucket": int64(1)}, bucketInf)
}
// TestWrongBucketsOrder tests the calling panic with incorrect order of buckets
func TestWrongBucketsOrder(t *testing.T) {
defer func() {
if r := recover(); r != nil {
assert.Equal(
t,
"histogram buckets must be in increasing order: 90.00 >= 20.00, metrics: first_metric_name, field: a",
fmt.Sprint(r),
)
}
}()
var cfg []config
cfg = append(cfg, config{Metric: "first_metric_name", Buckets: []float64{0.0, 90.0, 20.0, 30.0, 40.0}})
histogram := NewTestHistogram(cfg)
histogram.Add(firstMetric2)
}
// assertContainsTaggedField is help functions to test histogram data
func assertContainsTaggedField(t *testing.T, acc *testutil.Accumulator, metricName string, fields map[string]interface{}, le string) {
acc.Lock()
defer acc.Unlock()
for _, checkedMetric := range acc.Metrics {
// check metric name
if checkedMetric.Measurement != metricName {
continue
}
// check "le" tag
if checkedMetric.Tags[bucketTag] != le {
continue
}
// check fields
isFieldsIdentical := true
for field := range fields {
if _, ok := checkedMetric.Fields[field]; !ok {
isFieldsIdentical = false
break
}
}
if !isFieldsIdentical {
continue
}
// check fields with their counts
if assert.Equal(t, fields, checkedMetric.Fields) {
return
}
assert.Fail(t, fmt.Sprintf("incorrect fields %v of metric %s", fields, metricName))
}
assert.Fail(t, fmt.Sprintf("unknown measurement '%s' with tags: %v, fields: %v", metricName, map[string]string{"le": le}, fields))
}

View File

@@ -1,45 +1,61 @@
# Example Input Plugin
The example plugin gathers metrics about example things
The example plugin gathers metrics about example things. This description
explains at a high level what the plugin does and provides links to where
additional information can be found.
### Configuration:
This section contains the default TOML to configure the plugin. You can
generate it using `telegraf --usage <plugin-name>`.
```toml
# Description
[[inputs.example]]
# SampleConfig
example_option = "example_value"
```
### Measurements & Fields:
### Metrics:
<optional description>
Here you should add an optional description and links to where the user can
get more information about the measurements.
If the output is determined dynamically based on the input source, or there
are more metrics than can reasonably be listed, describe how the input is
mapped to the output.
- measurement1
- field1 (type, unit)
- field2 (float, percent)
- measurement2
- field3 (integer, bytes)
### Tags:
- All measurements have the following tags:
- tags:
- tag1 (optional description)
- tag2
- measurement2 has the following tags:
- fields:
- field1 (type, unit)
- field2 (float, percent)
- measurement2
- tags:
- tag3
- fields:
- field3 (integer, bytes)
### Sample Queries:
These are some useful queries (to generate dashboards or other) to run against data from this plugin:
This section should contain some useful InfluxDB queries that can be used to
get started with the plugin or to generate dashboards. For each query listed,
describe at a high level what data is returned.
Get the max, mean, and min for the measurement in the last hour:
```
SELECT max(field1), mean(field1), min(field1) FROM measurement1 WHERE tag1=bar AND time > now() - 1h GROUP BY tag
```
### Example Output:
This section shows example output in Line Protocol format. You can often use
`telegraf --input-filter <plugin-name> --test` or use the `file` output to get
this information.
```
$ ./telegraf -config telegraf.conf -input-filter example -test
measurement1,tag1=foo,tag2=bar field1=1i,field2=2.1 1453831884664956455
measurement2,tag1=foo,tag2=bar,tag3=baz field3=1i 1453831884664956455
```

File diff suppressed because one or more lines are too long

View File

@@ -10,7 +10,6 @@ import (
"time"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/internal/errchan"
"github.com/influxdata/telegraf/plugins/inputs"
as "github.com/aerospike/aerospike-client-go"
@@ -41,17 +40,16 @@ func (a *Aerospike) Gather(acc telegraf.Accumulator) error {
}
var wg sync.WaitGroup
errChan := errchan.New(len(a.Servers))
wg.Add(len(a.Servers))
for _, server := range a.Servers {
go func(serv string) {
defer wg.Done()
errChan.C <- a.gatherServer(serv, acc)
acc.AddError(a.gatherServer(serv, acc))
}(server)
}
wg.Wait()
return errChan.Error()
return nil
}
func (a *Aerospike) gatherServer(hostport string, acc telegraf.Accumulator) error {
@@ -75,10 +73,9 @@ func (a *Aerospike) gatherServer(hostport string, acc telegraf.Accumulator) erro
for _, n := range nodes {
tags := map[string]string{
"aerospike_host": hostport,
"node_name": n.GetName(),
}
fields := map[string]interface{}{
"node_name": n.GetName(),
}
fields := make(map[string]interface{})
stats, err := as.RequestNodeStats(n)
if err != nil {
return err
@@ -88,7 +85,7 @@ func (a *Aerospike) gatherServer(hostport string, acc telegraf.Accumulator) erro
if err == nil {
fields[strings.Replace(k, "-", "_", -1)] = val
} else {
log.Printf("I! skipping aerospike field %v with int64 overflow", k)
log.Printf("I! skipping aerospike field %v with int64 overflow: %q", k, v)
}
}
acc.AddFields("aerospike_node", fields, tags, time.Now())
@@ -102,11 +99,10 @@ func (a *Aerospike) gatherServer(hostport string, acc telegraf.Accumulator) erro
for _, namespace := range namespaces {
nTags := map[string]string{
"aerospike_host": hostport,
"node_name": n.GetName(),
}
nTags["namespace"] = namespace
nFields := map[string]interface{}{
"node_name": n.GetName(),
}
nFields := make(map[string]interface{})
info, err := as.RequestNodeInfo(n, "namespace/"+namespace)
if err != nil {
continue
@@ -121,7 +117,7 @@ func (a *Aerospike) gatherServer(hostport string, acc telegraf.Accumulator) erro
if err == nil {
nFields[strings.Replace(parts[0], "-", "_", -1)] = val
} else {
log.Printf("I! skipping aerospike field %v with int64 overflow", parts[0])
log.Printf("I! skipping aerospike field %v with int64 overflow: %q", parts[0], parts[1])
}
}
acc.AddFields("aerospike_namespace", nFields, nTags, time.Now())

View File

@@ -19,12 +19,14 @@ func TestAerospikeStatistics(t *testing.T) {
var acc testutil.Accumulator
err := a.Gather(&acc)
err := acc.GatherError(a.Gather)
require.NoError(t, err)
assert.True(t, acc.HasMeasurement("aerospike_node"))
assert.True(t, acc.HasTag("aerospike_node", "node_name"))
assert.True(t, acc.HasMeasurement("aerospike_namespace"))
assert.True(t, acc.HasIntField("aerospike_node", "batch_error"))
assert.True(t, acc.HasTag("aerospike_namespace", "node_name"))
assert.True(t, acc.HasInt64Field("aerospike_node", "batch_error"))
}
func TestAerospikeStatisticsPartialErr(t *testing.T) {
@@ -41,12 +43,11 @@ func TestAerospikeStatisticsPartialErr(t *testing.T) {
var acc testutil.Accumulator
err := a.Gather(&acc)
require.Error(t, err)
require.Error(t, acc.GatherError(a.Gather))
assert.True(t, acc.HasMeasurement("aerospike_node"))
assert.True(t, acc.HasMeasurement("aerospike_namespace"))
assert.True(t, acc.HasIntField("aerospike_node", "batch_error"))
assert.True(t, acc.HasInt64Field("aerospike_node", "batch_error"))
}
func TestAerospikeParseValue(t *testing.T) {

View File

@@ -2,8 +2,10 @@ package all
import (
_ "github.com/influxdata/telegraf/plugins/inputs/aerospike"
_ "github.com/influxdata/telegraf/plugins/inputs/amqp_consumer"
_ "github.com/influxdata/telegraf/plugins/inputs/apache"
_ "github.com/influxdata/telegraf/plugins/inputs/bcache"
_ "github.com/influxdata/telegraf/plugins/inputs/bond"
_ "github.com/influxdata/telegraf/plugins/inputs/cassandra"
_ "github.com/influxdata/telegraf/plugins/inputs/ceph"
_ "github.com/influxdata/telegraf/plugins/inputs/cgroup"
@@ -13,45 +15,65 @@ import (
_ "github.com/influxdata/telegraf/plugins/inputs/consul"
_ "github.com/influxdata/telegraf/plugins/inputs/couchbase"
_ "github.com/influxdata/telegraf/plugins/inputs/couchdb"
_ "github.com/influxdata/telegraf/plugins/inputs/dcos"
_ "github.com/influxdata/telegraf/plugins/inputs/disque"
_ "github.com/influxdata/telegraf/plugins/inputs/dmcache"
_ "github.com/influxdata/telegraf/plugins/inputs/dns_query"
_ "github.com/influxdata/telegraf/plugins/inputs/docker"
_ "github.com/influxdata/telegraf/plugins/inputs/dovecot"
_ "github.com/influxdata/telegraf/plugins/inputs/elasticsearch"
_ "github.com/influxdata/telegraf/plugins/inputs/exec"
_ "github.com/influxdata/telegraf/plugins/inputs/fail2ban"
_ "github.com/influxdata/telegraf/plugins/inputs/fibaro"
_ "github.com/influxdata/telegraf/plugins/inputs/filestat"
_ "github.com/influxdata/telegraf/plugins/inputs/fluentd"
_ "github.com/influxdata/telegraf/plugins/inputs/graylog"
_ "github.com/influxdata/telegraf/plugins/inputs/haproxy"
_ "github.com/influxdata/telegraf/plugins/inputs/hddtemp"
_ "github.com/influxdata/telegraf/plugins/inputs/http"
_ "github.com/influxdata/telegraf/plugins/inputs/http_listener"
_ "github.com/influxdata/telegraf/plugins/inputs/http_response"
_ "github.com/influxdata/telegraf/plugins/inputs/httpjson"
_ "github.com/influxdata/telegraf/plugins/inputs/influxdb"
_ "github.com/influxdata/telegraf/plugins/inputs/internal"
_ "github.com/influxdata/telegraf/plugins/inputs/interrupts"
_ "github.com/influxdata/telegraf/plugins/inputs/ipmi_sensor"
_ "github.com/influxdata/telegraf/plugins/inputs/ipset"
_ "github.com/influxdata/telegraf/plugins/inputs/iptables"
_ "github.com/influxdata/telegraf/plugins/inputs/jolokia"
_ "github.com/influxdata/telegraf/plugins/inputs/jolokia2"
_ "github.com/influxdata/telegraf/plugins/inputs/kafka_consumer"
_ "github.com/influxdata/telegraf/plugins/inputs/kafka_consumer_legacy"
_ "github.com/influxdata/telegraf/plugins/inputs/kapacitor"
_ "github.com/influxdata/telegraf/plugins/inputs/kubernetes"
_ "github.com/influxdata/telegraf/plugins/inputs/leofs"
_ "github.com/influxdata/telegraf/plugins/inputs/logparser"
_ "github.com/influxdata/telegraf/plugins/inputs/lustre2"
_ "github.com/influxdata/telegraf/plugins/inputs/mailchimp"
_ "github.com/influxdata/telegraf/plugins/inputs/mcrouter"
_ "github.com/influxdata/telegraf/plugins/inputs/memcached"
_ "github.com/influxdata/telegraf/plugins/inputs/mesos"
_ "github.com/influxdata/telegraf/plugins/inputs/minecraft"
_ "github.com/influxdata/telegraf/plugins/inputs/mongodb"
_ "github.com/influxdata/telegraf/plugins/inputs/mqtt_consumer"
_ "github.com/influxdata/telegraf/plugins/inputs/mysql"
_ "github.com/influxdata/telegraf/plugins/inputs/nats"
_ "github.com/influxdata/telegraf/plugins/inputs/nats_consumer"
_ "github.com/influxdata/telegraf/plugins/inputs/net_response"
_ "github.com/influxdata/telegraf/plugins/inputs/nginx"
_ "github.com/influxdata/telegraf/plugins/inputs/nginx_plus"
_ "github.com/influxdata/telegraf/plugins/inputs/nsq"
_ "github.com/influxdata/telegraf/plugins/inputs/nsq_consumer"
_ "github.com/influxdata/telegraf/plugins/inputs/nstat"
_ "github.com/influxdata/telegraf/plugins/inputs/ntpq"
_ "github.com/influxdata/telegraf/plugins/inputs/nvidia_smi"
_ "github.com/influxdata/telegraf/plugins/inputs/openldap"
_ "github.com/influxdata/telegraf/plugins/inputs/opensmtpd"
_ "github.com/influxdata/telegraf/plugins/inputs/passenger"
_ "github.com/influxdata/telegraf/plugins/inputs/pf"
_ "github.com/influxdata/telegraf/plugins/inputs/phpfpm"
_ "github.com/influxdata/telegraf/plugins/inputs/ping"
_ "github.com/influxdata/telegraf/plugins/inputs/postfix"
_ "github.com/influxdata/telegraf/plugins/inputs/postgresql"
_ "github.com/influxdata/telegraf/plugins/inputs/postgresql_extensible"
_ "github.com/influxdata/telegraf/plugins/inputs/powerdns"
@@ -63,21 +85,30 @@ import (
_ "github.com/influxdata/telegraf/plugins/inputs/redis"
_ "github.com/influxdata/telegraf/plugins/inputs/rethinkdb"
_ "github.com/influxdata/telegraf/plugins/inputs/riak"
_ "github.com/influxdata/telegraf/plugins/inputs/salesforce"
_ "github.com/influxdata/telegraf/plugins/inputs/sensors"
_ "github.com/influxdata/telegraf/plugins/inputs/smart"
_ "github.com/influxdata/telegraf/plugins/inputs/snmp"
_ "github.com/influxdata/telegraf/plugins/inputs/snmp_legacy"
_ "github.com/influxdata/telegraf/plugins/inputs/socket_listener"
_ "github.com/influxdata/telegraf/plugins/inputs/solr"
_ "github.com/influxdata/telegraf/plugins/inputs/sqlserver"
_ "github.com/influxdata/telegraf/plugins/inputs/statsd"
_ "github.com/influxdata/telegraf/plugins/inputs/sysstat"
_ "github.com/influxdata/telegraf/plugins/inputs/system"
_ "github.com/influxdata/telegraf/plugins/inputs/tail"
_ "github.com/influxdata/telegraf/plugins/inputs/tcp_listener"
_ "github.com/influxdata/telegraf/plugins/inputs/teamspeak"
_ "github.com/influxdata/telegraf/plugins/inputs/tomcat"
_ "github.com/influxdata/telegraf/plugins/inputs/trig"
_ "github.com/influxdata/telegraf/plugins/inputs/twemproxy"
_ "github.com/influxdata/telegraf/plugins/inputs/udp_listener"
_ "github.com/influxdata/telegraf/plugins/inputs/unbound"
_ "github.com/influxdata/telegraf/plugins/inputs/varnish"
_ "github.com/influxdata/telegraf/plugins/inputs/webhooks"
_ "github.com/influxdata/telegraf/plugins/inputs/win_perf_counters"
_ "github.com/influxdata/telegraf/plugins/inputs/win_services"
_ "github.com/influxdata/telegraf/plugins/inputs/zfs"
_ "github.com/influxdata/telegraf/plugins/inputs/zipkin"
_ "github.com/influxdata/telegraf/plugins/inputs/zookeeper"
)

View File

@@ -0,0 +1,47 @@
# AMQP Consumer Input Plugin
This plugin provides a consumer for use with AMQP 0-9-1, a promenent implementation of this protocol being [RabbitMQ](https://www.rabbitmq.com/).
Metrics are read from a topic exchange using the configured queue and binding_key.
Message payload should be formatted in one of the [Telegraf Data Formats](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md).
For an introduction to AMQP see:
- https://www.rabbitmq.com/tutorials/amqp-concepts.html
- https://www.rabbitmq.com/getstarted.html
The following defaults are known to work with RabbitMQ:
```toml
# AMQP consumer plugin
[[inputs.amqp_consumer]]
## AMQP url
url = "amqp://localhost:5672/influxdb"
## AMQP exchange
exchange = "telegraf"
## AMQP queue name
queue = "telegraf"
## Binding Key
binding_key = "#"
## Controls how many messages the server will try to keep on the network
## for consumers before receiving delivery acks.
#prefetch_count = 50
## Auth method. PLAIN and EXTERNAL are supported.
## Using EXTERNAL requires enabling the rabbitmq_auth_mechanism_ssl plugin as
## described here: https://www.rabbitmq.com/plugins.html
# auth_method = "PLAIN"
## Optional SSL Config
# ssl_ca = "/etc/telegraf/ca.pem"
# ssl_cert = "/etc/telegraf/cert.pem"
# ssl_key = "/etc/telegraf/key.pem"
## Use SSL but skip chain & host verification
# insecure_skip_verify = false
## Data format to consume.
## Each data format has its own unique set of configuration options, read
## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
data_format = "influx"
```

View File

@@ -0,0 +1,282 @@
package amqp_consumer
import (
"fmt"
"log"
"strings"
"sync"
"time"
"github.com/streadway/amqp"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/internal"
"github.com/influxdata/telegraf/plugins/inputs"
"github.com/influxdata/telegraf/plugins/parsers"
)
// AMQPConsumer is the top level struct for this plugin
type AMQPConsumer struct {
URL string
// AMQP exchange
Exchange string
// Queue Name
Queue string
// Binding Key
BindingKey string `toml:"binding_key"`
// Controls how many messages the server will try to keep on the network
// for consumers before receiving delivery acks.
PrefetchCount int
// AMQP Auth method
AuthMethod string
// Path to CA file
SSLCA string `toml:"ssl_ca"`
// Path to host cert file
SSLCert string `toml:"ssl_cert"`
// Path to cert key file
SSLKey string `toml:"ssl_key"`
// Use SSL but skip chain & host verification
InsecureSkipVerify bool
parser parsers.Parser
conn *amqp.Connection
wg *sync.WaitGroup
}
type externalAuth struct{}
func (a *externalAuth) Mechanism() string {
return "EXTERNAL"
}
func (a *externalAuth) Response() string {
return fmt.Sprintf("\000")
}
const (
DefaultAuthMethod = "PLAIN"
DefaultPrefetchCount = 50
)
func (a *AMQPConsumer) SampleConfig() string {
return `
## AMQP url
url = "amqp://localhost:5672/influxdb"
## AMQP exchange
exchange = "telegraf"
## AMQP queue name
queue = "telegraf"
## Binding Key
binding_key = "#"
## Maximum number of messages server should give to the worker.
prefetch_count = 50
## Auth method. PLAIN and EXTERNAL are supported
## Using EXTERNAL requires enabling the rabbitmq_auth_mechanism_ssl plugin as
## described here: https://www.rabbitmq.com/plugins.html
# auth_method = "PLAIN"
## Optional SSL Config
# ssl_ca = "/etc/telegraf/ca.pem"
# ssl_cert = "/etc/telegraf/cert.pem"
# ssl_key = "/etc/telegraf/key.pem"
## Use SSL but skip chain & host verification
# insecure_skip_verify = false
## Data format to consume.
## Each data format has its own unique set of configuration options, read
## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
data_format = "influx"
`
}
func (a *AMQPConsumer) Description() string {
return "AMQP consumer plugin"
}
func (a *AMQPConsumer) SetParser(parser parsers.Parser) {
a.parser = parser
}
// All gathering is done in the Start function
func (a *AMQPConsumer) Gather(_ telegraf.Accumulator) error {
return nil
}
func (a *AMQPConsumer) createConfig() (*amqp.Config, error) {
// make new tls config
tls, err := internal.GetTLSConfig(
a.SSLCert, a.SSLKey, a.SSLCA, a.InsecureSkipVerify)
if err != nil {
return nil, err
}
// parse auth method
var sasl []amqp.Authentication // nil by default
if strings.ToUpper(a.AuthMethod) == "EXTERNAL" {
sasl = []amqp.Authentication{&externalAuth{}}
}
config := amqp.Config{
TLSClientConfig: tls,
SASL: sasl, // if nil, it will be PLAIN
}
return &config, nil
}
// Start satisfies the telegraf.ServiceInput interface
func (a *AMQPConsumer) Start(acc telegraf.Accumulator) error {
amqpConf, err := a.createConfig()
if err != nil {
return err
}
msgs, err := a.connect(amqpConf)
if err != nil {
return err
}
a.wg = &sync.WaitGroup{}
a.wg.Add(1)
go a.process(msgs, acc)
go func() {
for {
err := <-a.conn.NotifyClose(make(chan *amqp.Error))
if err == nil {
break
}
log.Printf("I! AMQP consumer connection closed: %s; trying to reconnect", err)
for {
msgs, err := a.connect(amqpConf)
if err != nil {
log.Printf("E! AMQP connection failed: %s", err)
time.Sleep(10 * time.Second)
continue
}
a.wg.Add(1)
go a.process(msgs, acc)
break
}
}
}()
return nil
}
func (a *AMQPConsumer) connect(amqpConf *amqp.Config) (<-chan amqp.Delivery, error) {
conn, err := amqp.DialConfig(a.URL, *amqpConf)
if err != nil {
return nil, err
}
a.conn = conn
ch, err := conn.Channel()
if err != nil {
return nil, fmt.Errorf("Failed to open a channel: %s", err)
}
err = ch.ExchangeDeclare(
a.Exchange, // name
"topic", // type
true, // durable
false, // auto-deleted
false, // internal
false, // no-wait
nil, // arguments
)
if err != nil {
return nil, fmt.Errorf("Failed to declare an exchange: %s", err)
}
q, err := ch.QueueDeclare(
a.Queue, // queue
true, // durable
false, // delete when unused
false, // exclusive
false, // no-wait
nil, // arguments
)
if err != nil {
return nil, fmt.Errorf("Failed to declare a queue: %s", err)
}
err = ch.QueueBind(
q.Name, // queue
a.BindingKey, // binding-key
a.Exchange, // exchange
false,
nil,
)
if err != nil {
return nil, fmt.Errorf("Failed to bind a queue: %s", err)
}
err = ch.Qos(
a.PrefetchCount,
0, // prefetch-size
false, // global
)
if err != nil {
return nil, fmt.Errorf("Failed to set QoS: %s", err)
}
msgs, err := ch.Consume(
q.Name, // queue
"", // consumer
false, // auto-ack
false, // exclusive
false, // no-local
false, // no-wait
nil, // arguments
)
if err != nil {
return nil, fmt.Errorf("Failed establishing connection to queue: %s", err)
}
log.Println("I! Started AMQP consumer")
return msgs, err
}
// Read messages from queue and add them to the Accumulator
func (a *AMQPConsumer) process(msgs <-chan amqp.Delivery, acc telegraf.Accumulator) {
defer a.wg.Done()
for d := range msgs {
metrics, err := a.parser.Parse(d.Body)
if err != nil {
log.Printf("E! %v: error parsing metric - %v", err, string(d.Body))
} else {
for _, m := range metrics {
acc.AddFields(m.Name(), m.Fields(), m.Tags(), m.Time())
}
}
d.Ack(false)
}
log.Printf("I! AMQP consumer queue closed")
}
func (a *AMQPConsumer) Stop() {
err := a.conn.Close()
if err != nil && err != amqp.ErrClosed {
log.Printf("E! Error closing AMQP connection: %s", err)
return
}
a.wg.Wait()
log.Println("I! Stopped AMQP service")
}
func init() {
inputs.Add("amqp_consumer", func() telegraf.Input {
return &AMQPConsumer{
AuthMethod: DefaultAuthMethod,
PrefetchCount: DefaultPrefetchCount,
}
})
}

View File

@@ -1,55 +1,84 @@
# Telegraf plugin: Apache
# Apache Input Plugin
#### Plugin arguments:
- **urls** []string: List of apache-status URLs to collect from. Default is "http://localhost/server-status?auto".
- **username** string: Username for HTTP basic authentication
- **password** string: Password for HTTP basic authentication
- **timeout** duration: time that the HTTP connection will remain waiting for response. Defalt 4 seconds ("4s")
The Apache plugin collects server performance information using the [`mod_status`](https://httpd.apache.org/docs/2.4/mod/mod_status.html) module of the [Apache HTTP Server](https://httpd.apache.org/).
##### Optional SSL Config
Typically, the `mod_status` module is configured to expose a page at the `/server-status?auto` location of the Apache server. The [ExtendedStatus](https://httpd.apache.org/docs/2.4/mod/core.html#extendedstatus) option must be enabled in order to collect all available fields. For information about how to configure your server reference the [module documentation](https://httpd.apache.org/docs/2.4/mod/mod_status.html#enable).
- **ssl_ca** string: the full path for the SSL CA certicate
- **ssl_cert** string: the full path for the SSL certificate
- **ssl_key** string: the full path for the key file
- **insecure_skip_verify** bool: if true HTTP client will skip all SSL verifications related to peer and host. Default to false
### Configuration:
#### Description
```toml
# Read Apache status information (mod_status)
[[inputs.apache]]
## An array of URLs to gather from, must be directed at the machine
## readable version of the mod_status page including the auto query string.
## Default is "http://localhost/server-status?auto".
urls = ["http://localhost/server-status?auto"]
The Apache plugin collects from the /server-status?auto URL. See
[apache.org/server-status?auto](http://www.apache.org/server-status?auto) for an
example. And
[here](http://httpd.apache.org/docs/2.2/mod/mod_status.html) for the apache
mod_status documentation.
## Credentials for basic HTTP authentication.
# username = "myuser"
# password = "mypassword"
# Measurements:
## Maximum time to receive response.
# response_timeout = "5s"
Meta:
- tags: `port=<port>`, `server=url`
## Optional SSL Config
# ssl_ca = "/etc/telegraf/ca.pem"
# ssl_cert = "/etc/telegraf/cert.pem"
# ssl_key = "/etc/telegraf/key.pem"
## Use SSL but skip chain & host verification
# insecure_skip_verify = false
```
- apache_TotalAccesses
- apache_TotalkBytes
- apache_CPULoad
- apache_Uptime
- apache_ReqPerSec
- apache_BytesPerSec
- apache_BytesPerReq
- apache_BusyWorkers
- apache_IdleWorkers
- apache_ConnsTotal
- apache_ConnsAsyncWriting
- apache_ConnsAsyncKeepAlive
- apache_ConnsAsyncClosing
### Measurements & Fields:
### Scoreboard measurements
- apache
- BusyWorkers (float)
- BytesPerReq (float)
- BytesPerSec (float)
- ConnsAsyncClosing (float)
- ConnsAsyncKeepAlive (float)
- ConnsAsyncWriting (float)
- ConnsTotal (float)
- CPUChildrenSystem (float)
- CPUChildrenUser (float)
- CPULoad (float)
- CPUSystem (float)
- CPUUser (float)
- IdleWorkers (float)
- Load1 (float)
- Load5 (float)
- Load15 (float)
- ParentServerConfigGeneration (float)
- ParentServerMPMGeneration (float)
- ReqPerSec (float)
- ServerUptimeSeconds (float)
- TotalAccesses (float)
- TotalkBytes (float)
- Uptime (float)
- apache_scboard_waiting
- apache_scboard_starting
- apache_scboard_reading
- apache_scboard_sending
- apache_scboard_keepalive
- apache_scboard_dnslookup
- apache_scboard_closing
- apache_scboard_logging
- apache_scboard_finishing
- apache_scboard_idle_cleanup
- apache_scboard_open
The following fields are collected from the `Scoreboard`, and represent the number of requests in the given state:
- apache
- scboard_closing (float)
- scboard_dnslookup (float)
- scboard_finishing (float)
- scboard_idle_cleanup (float)
- scboard_keepalive (float)
- scboard_logging (float)
- scboard_open (float)
- scboard_reading (float)
- scboard_sending (float)
- scboard_starting (float)
- scboard_waiting (float)
### Tags:
- All measurements have the following tags:
- port
- server
### Example Output:
```
apache,port=80,server=debian-stretch-apache BusyWorkers=1,BytesPerReq=0,BytesPerSec=0,CPUChildrenSystem=0,CPUChildrenUser=0,CPULoad=0.00995025,CPUSystem=0.01,CPUUser=0.01,ConnsAsyncClosing=0,ConnsAsyncKeepAlive=0,ConnsAsyncWriting=0,ConnsTotal=0,IdleWorkers=49,Load1=0.01,Load15=0,Load5=0,ParentServerConfigGeneration=3,ParentServerMPMGeneration=2,ReqPerSec=0.00497512,ServerUptimeSeconds=201,TotalAccesses=1,TotalkBytes=0,Uptime=201,scboard_closing=0,scboard_dnslookup=0,scboard_finishing=0,scboard_idle_cleanup=0,scboard_keepalive=0,scboard_logging=0,scboard_open=100,scboard_reading=0,scboard_sending=1,scboard_starting=0,scboard_waiting=49 1502489900000000000
```

View File

@@ -8,6 +8,7 @@ import (
"net/url"
"strconv"
"strings"
"sync"
"time"
"github.com/influxdata/telegraf"
@@ -28,18 +29,22 @@ type Apache struct {
SSLKey string `toml:"ssl_key"`
// Use SSL but skip chain & host verification
InsecureSkipVerify bool
client *http.Client
}
var sampleConfig = `
## An array of Apache status URI to gather stats.
## An array of URLs to gather from, must be directed at the machine
## readable version of the mod_status page including the auto query string.
## Default is "http://localhost/server-status?auto".
urls = ["http://localhost/server-status?auto"]
## user credentials for basic HTTP authentication
username = "myuser"
password = "mypassword"
## Timeout to the complete conection and reponse time in seconds
response_timeout = "25s" ## default to 5 seconds
## Credentials for basic HTTP authentication.
# username = "myuser"
# password = "mypassword"
## Maximum time to receive response.
# response_timeout = "5s"
## Optional SSL Config
# ssl_ca = "/etc/telegraf/ca.pem"
@@ -65,55 +70,51 @@ func (n *Apache) Gather(acc telegraf.Accumulator) error {
n.ResponseTimeout.Duration = time.Second * 5
}
var outerr error
var errch = make(chan error)
for _, u := range n.Urls {
addr, err := url.Parse(u)
if err != nil {
return fmt.Errorf("Unable to parse address '%s': %s", u, err)
}
go func(addr *url.URL) {
errch <- n.gatherUrl(addr, acc)
}(addr)
}
// Drain channel, waiting for all requests to finish and save last error.
for range n.Urls {
if err := <-errch; err != nil {
outerr = err
}
}
return outerr
}
func (n *Apache) gatherUrl(addr *url.URL, acc telegraf.Accumulator) error {
var tr *http.Transport
if addr.Scheme == "https" {
tlsCfg, err := internal.GetTLSConfig(
n.SSLCert, n.SSLKey, n.SSLCA, n.InsecureSkipVerify)
if n.client == nil {
client, err := n.createHttpClient()
if err != nil {
return err
}
tr = &http.Transport{
ResponseHeaderTimeout: time.Duration(3 * time.Second),
TLSClientConfig: tlsCfg,
}
} else {
tr = &http.Transport{
ResponseHeaderTimeout: time.Duration(3 * time.Second),
n.client = client
}
var wg sync.WaitGroup
wg.Add(len(n.Urls))
for _, u := range n.Urls {
addr, err := url.Parse(u)
if err != nil {
acc.AddError(fmt.Errorf("Unable to parse address '%s': %s", u, err))
continue
}
go func(addr *url.URL) {
defer wg.Done()
acc.AddError(n.gatherUrl(addr, acc))
}(addr)
}
wg.Wait()
return nil
}
func (n *Apache) createHttpClient() (*http.Client, error) {
tlsCfg, err := internal.GetTLSConfig(
n.SSLCert, n.SSLKey, n.SSLCA, n.InsecureSkipVerify)
if err != nil {
return nil, err
}
client := &http.Client{
Transport: tr,
Timeout: n.ResponseTimeout.Duration,
Transport: &http.Transport{
TLSClientConfig: tlsCfg,
},
Timeout: n.ResponseTimeout.Duration,
}
return client, nil
}
func (n *Apache) gatherUrl(addr *url.URL, acc telegraf.Accumulator) error {
req, err := http.NewRequest("GET", addr.String(), nil)
if err != nil {
return fmt.Errorf("error on new request to %s : %s\n", addr.String(), err)
@@ -123,7 +124,7 @@ func (n *Apache) gatherUrl(addr *url.URL, acc telegraf.Accumulator) error {
req.SetBasicAuth(n.Username, n.Password)
}
resp, err := client.Do(req)
resp, err := n.client.Do(req)
if err != nil {
return fmt.Errorf("error on request to %s : %s\n", addr.String(), err)
}

View File

@@ -41,7 +41,7 @@ func TestHTTPApache(t *testing.T) {
}
var acc testutil.Accumulator
err := a.Gather(&acc)
err := acc.GatherError(a.Gather)
require.NoError(t, err)
fields := map[string]interface{}{

View File

@@ -70,7 +70,7 @@ Using this configuration:
When run with:
```
./telegraf -config telegraf.conf -input-filter bcache -test
./telegraf --config telegraf.conf --input-filter bcache --test
```
It produces:

View File

@@ -0,0 +1,85 @@
# Bond Input Plugin
The Bond input plugin collects network bond interface status for both the
network bond interface as well as slave interfaces.
The plugin collects these metrics from `/proc/net/bonding/*` files.
### Configuration:
```toml
[[inputs.bond]]
## Sets 'proc' directory path
## If not specified, then default is /proc
# host_proc = "/proc"
## By default, telegraf gather stats for all bond interfaces
## Setting interfaces will restrict the stats to the specified
## bond interfaces.
# bond_interfaces = ["bond0"]
```
### Measurements & Fields:
- bond
- active_slave (for active-backup mode)
- status
- bond_slave
- failures
- status
### Description:
```
active_slave
Currently active slave interface for active-backup mode.
status
Status of bond interface or bonds's slave interface (down = 0, up = 1).
failures
Amount of failures for bond's slave interface.
```
### Tags:
- bond
- bond
- bond_slave
- bond
- interface
### Example output:
Configuration:
```
[[inputs.bond]]
## Sets 'proc' directory path
## If not specified, then default is /proc
host_proc = "/proc"
## By default, telegraf gather stats for all bond interfaces
## Setting interfaces will restrict the stats to the specified
## bond interfaces.
bond_interfaces = ["bond0", "bond1"]
```
Run:
```
telegraf --config telegraf.conf --input-filter bond --test
```
Output:
```
* Plugin: inputs.bond, Collection 1
> bond,bond=bond1,host=local active_slave="eth0",status=1i 1509704525000000000
> bond_slave,bond=bond1,interface=eth0,host=local status=1i,failures=0i 1509704525000000000
> bond_slave,host=local,bond=bond1,interface=eth1 status=1i,failures=0i 1509704525000000000
> bond,bond=bond0,host=isvetlov-mac.local status=1i 1509704525000000000
> bond_slave,bond=bond0,interface=eth1,host=local status=1i,failures=0i 1509704525000000000
> bond_slave,bond=bond0,interface=eth2,host=local status=1i,failures=0i 1509704525000000000
```

204
plugins/inputs/bond/bond.go Normal file
View File

@@ -0,0 +1,204 @@
package bond
import (
"bufio"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"strconv"
"strings"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins/inputs"
)
// default host proc path
const defaultHostProc = "/proc"
// env host proc variable name
const envProc = "HOST_PROC"
type Bond struct {
HostProc string `toml:"host_proc"`
BondInterfaces []string `toml:"bond_interfaces"`
}
var sampleConfig = `
## Sets 'proc' directory path
## If not specified, then default is /proc
# host_proc = "/proc"
## By default, telegraf gather stats for all bond interfaces
## Setting interfaces will restrict the stats to the specified
## bond interfaces.
# bond_interfaces = ["bond0"]
`
func (bond *Bond) Description() string {
return "Collect bond interface status, slaves statuses and failures count"
}
func (bond *Bond) SampleConfig() string {
return sampleConfig
}
func (bond *Bond) Gather(acc telegraf.Accumulator) error {
// load proc path, get default value if config value and env variable are empty
bond.loadPath()
// list bond interfaces from bonding directory or gather all interfaces.
bondNames, err := bond.listInterfaces()
if err != nil {
return err
}
for _, bondName := range bondNames {
bondAbsPath := bond.HostProc + "/net/bonding/" + bondName
file, err := ioutil.ReadFile(bondAbsPath)
if err != nil {
acc.AddError(fmt.Errorf("error inspecting '%s' interface: %v", bondAbsPath, err))
continue
}
rawFile := strings.TrimSpace(string(file))
err = bond.gatherBondInterface(bondName, rawFile, acc)
if err != nil {
acc.AddError(fmt.Errorf("error inspecting '%s' interface: %v", bondName, err))
}
}
return nil
}
func (bond *Bond) gatherBondInterface(bondName string, rawFile string, acc telegraf.Accumulator) error {
splitIndex := strings.Index(rawFile, "Slave Interface:")
if splitIndex == -1 {
splitIndex = len(rawFile)
}
bondPart := rawFile[:splitIndex]
slavePart := rawFile[splitIndex:]
err := bond.gatherBondPart(bondName, bondPart, acc)
if err != nil {
return err
}
err = bond.gatherSlavePart(bondName, slavePart, acc)
if err != nil {
return err
}
return nil
}
func (bond *Bond) gatherBondPart(bondName string, rawFile string, acc telegraf.Accumulator) error {
fields := make(map[string]interface{})
tags := map[string]string{
"bond": bondName,
}
scanner := bufio.NewScanner(strings.NewReader(rawFile))
for scanner.Scan() {
line := scanner.Text()
stats := strings.Split(line, ":")
if len(stats) < 2 {
continue
}
name := strings.TrimSpace(stats[0])
value := strings.TrimSpace(stats[1])
if strings.Contains(name, "Currently Active Slave") {
fields["active_slave"] = value
}
if strings.Contains(name, "MII Status") {
fields["status"] = 0
if value == "up" {
fields["status"] = 1
}
acc.AddFields("bond", fields, tags)
return nil
}
}
if err := scanner.Err(); err != nil {
return err
}
return fmt.Errorf("Couldn't find status info for '%s' ", bondName)
}
func (bond *Bond) gatherSlavePart(bondName string, rawFile string, acc telegraf.Accumulator) error {
var slave string
var status int
scanner := bufio.NewScanner(strings.NewReader(rawFile))
for scanner.Scan() {
line := scanner.Text()
stats := strings.Split(line, ":")
if len(stats) < 2 {
continue
}
name := strings.TrimSpace(stats[0])
value := strings.TrimSpace(stats[1])
if strings.Contains(name, "Slave Interface") {
slave = value
}
if strings.Contains(name, "MII Status") {
status = 0
if value == "up" {
status = 1
}
}
if strings.Contains(name, "Link Failure Count") {
count, err := strconv.Atoi(value)
if err != nil {
return err
}
fields := map[string]interface{}{
"status": status,
"failures": count,
}
tags := map[string]string{
"bond": bondName,
"interface": slave,
}
acc.AddFields("bond_slave", fields, tags)
}
}
if err := scanner.Err(); err != nil {
return err
}
return nil
}
// loadPath can be used to read path firstly from config
// if it is empty then try read from env variable
func (bond *Bond) loadPath() {
if bond.HostProc == "" {
bond.HostProc = proc(envProc, defaultHostProc)
}
}
// proc can be used to read file paths from env
func proc(env, path string) string {
// try to read full file path
if p := os.Getenv(env); p != "" {
return p
}
// return default path
return path
}
func (bond *Bond) listInterfaces() ([]string, error) {
var interfaces []string
if len(bond.BondInterfaces) > 0 {
interfaces = bond.BondInterfaces
} else {
paths, err := filepath.Glob(bond.HostProc + "/net/bonding/*")
if err != nil {
return nil, err
}
for _, p := range paths {
interfaces = append(interfaces, filepath.Base(p))
}
}
return interfaces, nil
}
func init() {
inputs.Add("bond", func() telegraf.Input {
return &Bond{}
})
}

View File

@@ -0,0 +1,77 @@
package bond
import (
"testing"
"github.com/influxdata/telegraf/testutil"
)
var sampleTest802 = `
Ethernet Channel Bonding Driver: v3.5.0 (November 4, 2008)
Bonding Mode: IEEE 802.3ad Dynamic link aggregation
Transmit Hash Policy: layer2 (0)
MII Status: up
MII Polling Interval (ms): 100
Up Delay (ms): 0
Down Delay (ms): 0
802.3ad info
LACP rate: fast
Aggregator selection policy (ad_select): stable
bond bond0 has no active aggregator
Slave Interface: eth1
MII Status: up
Link Failure Count: 0
Permanent HW addr: 00:0c:29:f5:b7:11
Aggregator ID: N/A
Slave Interface: eth2
MII Status: up
Link Failure Count: 3
Permanent HW addr: 00:0c:29:f5:b7:1b
Aggregator ID: N/A
`
var sampleTestAB = `
Ethernet Channel Bonding Driver: v3.6.0 (September 26, 2009)
Bonding Mode: fault-tolerance (active-backup)
Primary Slave: eth2 (primary_reselect always)
Currently Active Slave: eth2
MII Status: up
MII Polling Interval (ms): 100
Up Delay (ms): 0
Down Delay (ms): 0
Slave Interface: eth3
MII Status: down
Speed: 1000 Mbps
Duplex: full
Link Failure Count: 2
Permanent HW addr:
Slave queue ID: 0
Slave Interface: eth2
MII Status: up
Speed: 100 Mbps
Duplex: full
Link Failure Count: 0
Permanent HW addr:
`
func TestGatherBondInterface(t *testing.T) {
var acc testutil.Accumulator
bond := &Bond{}
bond.gatherBondInterface("bond802", sampleTest802, &acc)
acc.AssertContainsTaggedFields(t, "bond", map[string]interface{}{"status": 1}, map[string]string{"bond": "bond802"})
acc.AssertContainsTaggedFields(t, "bond_slave", map[string]interface{}{"failures": 0, "status": 1}, map[string]string{"bond": "bond802", "interface": "eth1"})
acc.AssertContainsTaggedFields(t, "bond_slave", map[string]interface{}{"failures": 3, "status": 1}, map[string]string{"bond": "bond802", "interface": "eth2"})
bond.gatherBondInterface("bondAB", sampleTestAB, &acc)
acc.AssertContainsTaggedFields(t, "bond", map[string]interface{}{"active_slave": "eth2", "status": 1}, map[string]string{"bond": "bondAB"})
acc.AssertContainsTaggedFields(t, "bond_slave", map[string]interface{}{"failures": 2, "status": 0}, map[string]string{"bond": "bondAB", "interface": "eth3"})
acc.AssertContainsTaggedFields(t, "bond_slave", map[string]interface{}{"failures": 0, "status": 1}, map[string]string{"bond": "bondAB", "interface": "eth2"})
}

View File

@@ -1,5 +1,8 @@
# Telegraf plugin: Cassandra
### **Deprecated in version 1.7**: Please use the [jolokia2](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/jolokia2) plugin with the [cassandra.conf](/plugins/inputs/jolokia2/examples/cassandra.conf) example configuration.
#### Plugin arguments:
- **context** string: Context root used for jolokia url
- **servers** []string: List of servers with the format "<user:passwd@><host>:port"

View File

@@ -4,13 +4,14 @@ import (
"encoding/json"
"errors"
"fmt"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins/inputs"
"io/ioutil"
"log"
"net/http"
"net/url"
"strings"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins/inputs"
)
type JolokiaClient interface {
@@ -61,7 +62,8 @@ func newCassandraMetric(host string, metric string,
func addValuesAsFields(values map[string]interface{}, fields map[string]interface{},
mname string) {
for k, v := range values {
if v != nil {
switch v.(type) {
case int64, float64, string, bool:
fields[mname+"_"+k] = v
}
}
@@ -118,13 +120,13 @@ func (j javaMetric) addTagsFields(out map[string]interface{}) {
switch t := values.(type) {
case map[string]interface{}:
addValuesAsFields(values.(map[string]interface{}), fields, attribute)
case interface{}:
case int64, float64, string, bool:
fields[attribute] = t
}
j.acc.AddFields(tokens["class"]+tokens["type"], fields, tags)
} else {
fmt.Printf("Missing key 'value' in '%s' output response\n%v\n",
j.metric, out)
j.acc.AddError(fmt.Errorf("Missing key 'value' in '%s' output response\n%v\n",
j.metric, out))
}
}
@@ -155,8 +157,8 @@ func (c cassandraMetric) addTagsFields(out map[string]interface{}) {
addCassandraMetric(k, c, v.(map[string]interface{}))
}
} else {
fmt.Printf("Missing key 'value' in '%s' output response\n%v\n",
c.metric, out)
c.acc.AddError(fmt.Errorf("Missing key 'value' in '%s' output response\n%v\n",
c.metric, out))
return
}
} else {
@@ -164,8 +166,8 @@ func (c cassandraMetric) addTagsFields(out map[string]interface{}) {
addCassandraMetric(r.(map[string]interface{})["mbean"].(string),
c, values.(map[string]interface{}))
} else {
fmt.Printf("Missing key 'value' in '%s' output response\n%v\n",
c.metric, out)
c.acc.AddError(fmt.Errorf("Missing key 'value' in '%s' output response\n%v\n",
c.metric, out))
return
}
}
@@ -173,7 +175,11 @@ func (c cassandraMetric) addTagsFields(out map[string]interface{}) {
func (j *Cassandra) SampleConfig() string {
return `
# This is the context root used to compose the jolokia url
## DEPRECATED: The cassandra plugin has been deprecated. Please use the
## jolokia2 plugin instead.
##
## see https://github.com/influxdata/telegraf/tree/master/plugins/inputs/jolokia2
context = "/jolokia/read"
## List of cassandra servers exposing jolokia read service
servers = ["myuser:mypassword@10.10.10.1:8778","10.10.10.2:8778",":8778"]
@@ -257,6 +263,16 @@ func parseServerTokens(server string) map[string]string {
return serverTokens
}
func (c *Cassandra) Start(acc telegraf.Accumulator) error {
log.Println("W! DEPRECATED: The cassandra plugin has been deprecated. " +
"Please use the jolokia2 plugin instead. " +
"https://github.com/influxdata/telegraf/tree/master/plugins/inputs/jolokia2")
return nil
}
func (c *Cassandra) Stop() {
}
func (c *Cassandra) Gather(acc telegraf.Accumulator) error {
context := c.Context
servers := c.Servers
@@ -274,8 +290,8 @@ func (c *Cassandra) Gather(acc telegraf.Accumulator) error {
m = newCassandraMetric(serverTokens["host"], metric, acc)
} else {
// unsupported metric type
log.Printf("I! Unsupported Cassandra metric [%s], skipping",
metric)
acc.AddError(fmt.Errorf("E! Unsupported Cassandra metric [%s], skipping",
metric))
continue
}
@@ -283,7 +299,8 @@ func (c *Cassandra) Gather(acc telegraf.Accumulator) error {
requestUrl, err := url.Parse("http://" + serverTokens["host"] + ":" +
serverTokens["port"] + context + metric)
if err != nil {
return err
acc.AddError(err)
continue
}
if serverTokens["user"] != "" && serverTokens["passwd"] != "" {
requestUrl.User = url.UserPassword(serverTokens["user"],
@@ -291,8 +308,12 @@ func (c *Cassandra) Gather(acc telegraf.Accumulator) error {
}
out, err := c.getAttr(requestUrl)
if err != nil {
acc.AddError(err)
continue
}
if out["status"] != 200.0 {
fmt.Printf("URL returned with status %v\n", out["status"])
acc.AddError(fmt.Errorf("URL returned with status %v - %s\n", out["status"], requestUrl))
continue
}
m.addTagsFields(out)

View File

@@ -151,7 +151,7 @@ func TestHttpJsonJavaMultiValue(t *testing.T) {
var acc testutil.Accumulator
acc.SetDebug(true)
err := cassandra.Gather(&acc)
err := acc.GatherError(cassandra.Gather)
assert.Nil(t, err)
assert.Equal(t, 2, len(acc.Metrics))
@@ -180,7 +180,7 @@ func TestHttpJsonJavaMultiType(t *testing.T) {
var acc testutil.Accumulator
acc.SetDebug(true)
err := cassandra.Gather(&acc)
err := acc.GatherError(cassandra.Gather)
assert.Nil(t, err)
assert.Equal(t, 2, len(acc.Metrics))
@@ -197,16 +197,17 @@ func TestHttpJsonJavaMultiType(t *testing.T) {
}
// Test that the proper values are ignored or collected
func TestHttpJsonOn404(t *testing.T) {
func TestHttp404(t *testing.T) {
jolokia := genJolokiaClientStub(validJavaMultiValueJSON, 404, Servers,
jolokia := genJolokiaClientStub(invalidJSON, 404, Servers,
[]string{HeapMetric})
var acc testutil.Accumulator
err := jolokia.Gather(&acc)
err := acc.GatherError(jolokia.Gather)
assert.Nil(t, err)
assert.Error(t, err)
assert.Equal(t, 0, len(acc.Metrics))
assert.Contains(t, err.Error(), "has status code 404")
}
// Test that the proper values are ignored or collected for class=Cassandra
@@ -214,7 +215,7 @@ func TestHttpJsonCassandraMultiValue(t *testing.T) {
cassandra := genJolokiaClientStub(validCassandraMultiValueJSON, 200, Servers, []string{ReadLatencyMetric})
var acc testutil.Accumulator
err := cassandra.Gather(&acc)
err := acc.GatherError(cassandra.Gather)
assert.Nil(t, err)
assert.Equal(t, 1, len(acc.Metrics))
@@ -246,7 +247,7 @@ func TestHttpJsonCassandraNestedMultiValue(t *testing.T) {
var acc testutil.Accumulator
acc.SetDebug(true)
err := cassandra.Gather(&acc)
err := acc.GatherError(cassandra.Gather)
assert.Nil(t, err)
assert.Equal(t, 2, len(acc.Metrics))

View File

@@ -117,7 +117,7 @@ All fields are collected under the **ceph** measurement and stored as float64s.
* recovering\_objects\_per\_sec (float)
* ceph\_pgmap\_state
* state name e.g. active+clean (float)
* count (float)
* ceph\_usage
* bytes\_used (float)
@@ -186,7 +186,7 @@ All measurements will have the following tags:
*Cluster Stats*
* ceph\_pg\_state has the following tags:
* ceph\_pgmap\_state has the following tags:
* state (state for which the value applies e.g. active+clean, active+remapped+backfill)
* ceph\_pool\_usage has the following tags:
* id
@@ -200,7 +200,7 @@ All measurements will have the following tags:
*Admin Socket Stats*
<pre>
telegraf -test -config /etc/telegraf/telegraf.conf -config-directory /etc/telegraf/telegraf.d -input-filter ceph
telegraf --config /etc/telegraf/telegraf.conf --config-directory /etc/telegraf/telegraf.d --input-filter ceph --test
* Plugin: ceph, Collection 1
> ceph,collection=paxos, id=node-2,role=openstack,type=mon accept_timeout=0,begin=14931264,begin_bytes.avgcount=14931264,begin_bytes.sum=180309683362,begin_keys.avgcount=0,begin_keys.sum=0,begin_latency.avgcount=14931264,begin_latency.sum=9293.29589,collect=1,collect_bytes.avgcount=1,collect_bytes.sum=24,collect_keys.avgcount=1,collect_keys.sum=1,collect_latency.avgcount=1,collect_latency.sum=0.00028,collect_timeout=0,collect_uncommitted=0,commit=14931264,commit_bytes.avgcount=0,commit_bytes.sum=0,commit_keys.avgcount=0,commit_keys.sum=0,commit_latency.avgcount=0,commit_latency.sum=0,lease_ack_timeout=0,lease_timeout=0,new_pn=0,new_pn_latency.avgcount=0,new_pn_latency.sum=0,refresh=14931264,refresh_latency.avgcount=14931264,refresh_latency.sum=8706.98498,restart=4,share_state=0,share_state_bytes.avgcount=0,share_state_bytes.sum=0,share_state_keys.avgcount=0,share_state_keys.sum=0,start_leader=0,start_peon=1,store_state=14931264,store_state_bytes.avgcount=14931264,store_state_bytes.sum=353119959211,store_state_keys.avgcount=14931264,store_state_keys.sum=289807523,store_state_latency.avgcount=14931264,store_state_latency.sum=10952.835724 1462821234814535148
> ceph,collection=throttle-mon_client_bytes,id=node-2,type=mon get=1413017,get_or_fail_fail=0,get_or_fail_success=0,get_sum=71211705,max=104857600,put=1413013,put_sum=71211459,take=0,take_sum=0,val=246,wait.avgcount=0,wait.sum=0 1462821234814737219
@@ -213,7 +213,8 @@ telegraf -test -config /etc/telegraf/telegraf.conf -config-directory /etc/telegr
<pre>
> ceph_osdmap,host=ceph-mon-0 epoch=170772,full=false,nearfull=false,num_in_osds=340,num_osds=340,num_remapped_pgs=0,num_up_osds=340 1468841037000000000
> ceph_pgmap,host=ceph-mon-0 bytes_avail=634895531270144,bytes_total=812117151809536,bytes_used=177221620539392,data_bytes=56979991615058,num_pgs=22952,op_per_sec=15869,read_bytes_sec=43956026,version=39387592,write_bytes_sec=165344818 1468841037000000000
> ceph_pgmap_state,host=ceph-mon-0 active+clean=22952 1468928660000000000
> ceph_pgmap_state,host=ceph-mon-0,state=active+clean count=22952 1468928660000000000
> ceph_pgmap_state,host=ceph-mon-0,state=active+degraded count=16 1468928660000000000
> ceph_usage,host=ceph-mon-0 total_avail_bytes=634895514791936,total_bytes=812117151809536,total_used_bytes=177221637017600 1468841037000000000
> ceph_pool_usage,host=ceph-mon-0,id=150,name=cinder.volumes bytes_used=12648553794802,kb_used=12352103316,max_avail=154342562489244,objects=3026295 1468841037000000000
> ceph_pool_usage,host=ceph-mon-0,id=182,name=cinder.volumes.flash bytes_used=8541308223964,kb_used=8341121313,max_avail=39388593563936,objects=2075066 1468841037000000000

View File

@@ -4,13 +4,14 @@ import (
"bytes"
"encoding/json"
"fmt"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins/inputs"
"io/ioutil"
"log"
"os/exec"
"path/filepath"
"strings"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins/inputs"
)
const (
@@ -100,15 +101,15 @@ func (c *Ceph) gatherAdminSocketStats(acc telegraf.Accumulator) error {
for _, s := range sockets {
dump, err := perfDump(c.CephBinary, s)
if err != nil {
log.Printf("E! error reading from socket '%s': %v", s.socket, err)
acc.AddError(fmt.Errorf("E! error reading from socket '%s': %v", s.socket, err))
continue
}
data, err := parseDump(dump)
if err != nil {
log.Printf("E! error parsing dump from socket '%s': %v", s.socket, err)
acc.AddError(fmt.Errorf("E! error parsing dump from socket '%s': %v", s.socket, err))
continue
}
for tag, metrics := range *data {
for tag, metrics := range data {
acc.AddFields(measurement,
map[string]interface{}(metrics),
map[string]string{"type": s.sockType, "id": s.sockId, "collection": tag})
@@ -244,25 +245,19 @@ type taggedMetricMap map[string]metricMap
// Parses a raw JSON string into a taggedMetricMap
// Delegates the actual parsing to newTaggedMetricMap(..)
func parseDump(dump string) (*taggedMetricMap, error) {
func parseDump(dump string) (taggedMetricMap, error) {
data := make(map[string]interface{})
err := json.Unmarshal([]byte(dump), &data)
if err != nil {
return nil, fmt.Errorf("failed to parse json: '%s': %v", dump, err)
}
tmm := newTaggedMetricMap(data)
if err != nil {
return nil, fmt.Errorf("failed to tag dataset: '%v': %v", tmm, err)
}
return tmm, nil
return newTaggedMetricMap(data), nil
}
// Builds a TaggedMetricMap out of a generic string map.
// The top-level key is used as a tag and all sub-keys are flattened into metrics
func newTaggedMetricMap(data map[string]interface{}) *taggedMetricMap {
func newTaggedMetricMap(data map[string]interface{}) taggedMetricMap {
tmm := make(taggedMetricMap)
for tag, datapoints := range data {
mm := make(metricMap)
@@ -271,7 +266,7 @@ func newTaggedMetricMap(data map[string]interface{}) *taggedMetricMap {
}
tmm[tag] = mm
}
return &tmm
return tmm
}
// Recursively flattens any k-v hierarchy present in data.
@@ -376,36 +371,53 @@ func decodeStatusPgmap(acc telegraf.Accumulator, data map[string]interface{}) er
return nil
}
func decodeStatusPgmapState(acc telegraf.Accumulator, data map[string]interface{}) error {
func extractPgmapStates(data map[string]interface{}) ([]interface{}, error) {
const key = "pgs_by_state"
pgmap, ok := data["pgmap"].(map[string]interface{})
if !ok {
return fmt.Errorf("WARNING %s - unable to decode pgmap", measurement)
return nil, fmt.Errorf("WARNING %s - unable to decode pgmap", measurement)
}
fields := make(map[string]interface{})
for key, value := range pgmap {
switch value.(type) {
case []interface{}:
if key != "pgs_by_state" {
continue
}
for _, state := range value.([]interface{}) {
state_map, ok := state.(map[string]interface{})
if !ok {
return fmt.Errorf("WARNING %s - unable to decode pg state", measurement)
}
state_name, ok := state_map["state_name"].(string)
if !ok {
return fmt.Errorf("WARNING %s - unable to decode pg state name", measurement)
}
state_count, ok := state_map["count"].(float64)
if !ok {
return fmt.Errorf("WARNING %s - unable to decode pg state count", measurement)
}
fields[state_name] = state_count
}
s, ok := pgmap[key]
if !ok {
return nil, fmt.Errorf("WARNING %s - pgmap is missing the %s field", measurement, key)
}
states, ok := s.([]interface{})
if !ok {
return nil, fmt.Errorf("WARNING %s - pgmap[%s] is not a list", measurement, key)
}
return states, nil
}
func decodeStatusPgmapState(acc telegraf.Accumulator, data map[string]interface{}) error {
states, err := extractPgmapStates(data)
if err != nil {
return err
}
for _, state := range states {
stateMap, ok := state.(map[string]interface{})
if !ok {
return fmt.Errorf("WARNING %s - unable to decode pg state", measurement)
}
stateName, ok := stateMap["state_name"].(string)
if !ok {
return fmt.Errorf("WARNING %s - unable to decode pg state name", measurement)
}
stateCount, ok := stateMap["count"].(float64)
if !ok {
return fmt.Errorf("WARNING %s - unable to decode pg state count", measurement)
}
tags := map[string]string{
"state": stateName,
}
fields := map[string]interface{}{
"count": stateCount,
}
acc.AddFields("ceph_pgmap_state", fields, tags)
}
acc.AddFields("ceph_pgmap_state", fields, map[string]string{})
return nil
}

View File

@@ -1,15 +1,17 @@
package ceph
import (
"encoding/json"
"fmt"
"github.com/influxdata/telegraf/testutil"
"github.com/stretchr/testify/assert"
"io/ioutil"
"os"
"path"
"strconv"
"strings"
"testing"
"github.com/influxdata/telegraf/testutil"
"github.com/stretchr/testify/assert"
)
const (
@@ -24,15 +26,38 @@ func TestParseSockId(t *testing.T) {
func TestParseMonDump(t *testing.T) {
dump, err := parseDump(monPerfDump)
assert.NoError(t, err)
assert.InEpsilon(t, 5678670180, (*dump)["cluster"]["osd_kb_used"], epsilon)
assert.InEpsilon(t, 6866.540527000, (*dump)["paxos"]["store_state_latency.sum"], epsilon)
assert.InEpsilon(t, int64(5678670180), dump["cluster"]["osd_kb_used"], epsilon)
assert.InEpsilon(t, 6866.540527000, dump["paxos"]["store_state_latency.sum"], epsilon)
}
func TestParseOsdDump(t *testing.T) {
dump, err := parseDump(osdPerfDump)
assert.NoError(t, err)
assert.InEpsilon(t, 552132.109360000, (*dump)["filestore"]["commitcycle_interval.sum"], epsilon)
assert.Equal(t, float64(0), (*dump)["mutex-FileJournal::finisher_lock"]["wait.avgcount"])
assert.InEpsilon(t, 552132.109360000, dump["filestore"]["commitcycle_interval.sum"], epsilon)
assert.Equal(t, float64(0), dump["mutex-FileJournal::finisher_lock"]["wait.avgcount"])
}
func TestDecodeStatusPgmapState(t *testing.T) {
data := make(map[string]interface{})
err := json.Unmarshal([]byte(clusterStatusDump), &data)
assert.NoError(t, err)
acc := &testutil.Accumulator{}
err = decodeStatusPgmapState(acc, data)
assert.NoError(t, err)
var results = []struct {
fields map[string]interface{}
tags map[string]string
}{
{map[string]interface{}{"count": float64(2560)}, map[string]string{"state": "active+clean"}},
{map[string]interface{}{"count": float64(10)}, map[string]string{"state": "active+scrubbing"}},
{map[string]interface{}{"count": float64(5)}, map[string]string{"state": "active+backfilling"}},
}
for _, r := range results {
acc.AssertContainsTaggedFields(t, "ceph_pgmap_state", r.fields, r.tags)
}
}
func TestGather(t *testing.T) {
@@ -685,3 +710,127 @@ var osdPerfDump = `
"wait": { "avgcount": 0,
"sum": 0.000000000}}}
`
var clusterStatusDump = `
{
"health": {
"health": {
"health_services": [
{
"mons": [
{
"name": "a",
"kb_total": 114289256,
"kb_used": 26995516,
"kb_avail": 81465132,
"avail_percent": 71,
"last_updated": "2017-01-03 17:20:57.595004",
"store_stats": {
"bytes_total": 942117141,
"bytes_sst": 0,
"bytes_log": 4345406,
"bytes_misc": 937771735,
"last_updated": "0.000000"
},
"health": "HEALTH_OK"
},
{
"name": "b",
"kb_total": 114289256,
"kb_used": 27871624,
"kb_avail": 80589024,
"avail_percent": 70,
"last_updated": "2017-01-03 17:20:47.784331",
"store_stats": {
"bytes_total": 454853104,
"bytes_sst": 0,
"bytes_log": 5788320,
"bytes_misc": 449064784,
"last_updated": "0.000000"
},
"health": "HEALTH_OK"
},
{
"name": "c",
"kb_total": 130258508,
"kb_used": 38076996,
"kb_avail": 85541692,
"avail_percent": 65,
"last_updated": "2017-01-03 17:21:03.311123",
"store_stats": {
"bytes_total": 455555199,
"bytes_sst": 0,
"bytes_log": 6950876,
"bytes_misc": 448604323,
"last_updated": "0.000000"
},
"health": "HEALTH_OK"
}
]
}
]
},
"timechecks": {
"epoch": 504,
"round": 34642,
"round_status": "finished",
"mons": [
{ "name": "a", "skew": 0, "latency": 0, "health": "HEALTH_OK" },
{ "name": "b", "skew": -0, "latency": 0.000951, "health": "HEALTH_OK" },
{ "name": "c", "skew": -0, "latency": 0.000946, "health": "HEALTH_OK" }
]
},
"summary": [],
"overall_status": "HEALTH_OK",
"detail": []
},
"fsid": "01234567-abcd-9876-0123-ffeeddccbbaa",
"election_epoch": 504,
"quorum": [ 0, 1, 2 ],
"quorum_names": [ "a", "b", "c" ],
"monmap": {
"epoch": 17,
"fsid": "01234567-abcd-9876-0123-ffeeddccbbaa",
"modified": "2016-04-11 14:01:52.600198",
"created": "0.000000",
"mons": [
{ "rank": 0, "name": "a", "addr": "192.168.0.1:6789/0" },
{ "rank": 1, "name": "b", "addr": "192.168.0.2:6789/0" },
{ "rank": 2, "name": "c", "addr": "192.168.0.3:6789/0" }
]
},
"osdmap": {
"osdmap": {
"epoch": 21734,
"num_osds": 24,
"num_up_osds": 24,
"num_in_osds": 24,
"full": false,
"nearfull": false,
"num_remapped_pgs": 0
}
},
"pgmap": {
"pgs_by_state": [
{ "state_name": "active+clean", "count": 2560 },
{ "state_name": "active+scrubbing", "count": 10 },
{ "state_name": "active+backfilling", "count": 5 }
],
"version": 52314277,
"num_pgs": 2560,
"data_bytes": 2700031960713,
"bytes_used": 7478347665408,
"bytes_avail": 9857462382592,
"bytes_total": 17335810048000,
"read_bytes_sec": 0,
"write_bytes_sec": 367217,
"op_per_sec": 98
},
"mdsmap": {
"epoch": 1,
"up": 0,
"in": 0,
"max": 0,
"by_rank": []
}
}
`

View File

@@ -22,10 +22,11 @@ func (g *CGroup) Gather(acc telegraf.Accumulator) error {
for dir := range list {
if dir.err != nil {
return dir.err
acc.AddError(dir.err)
continue
}
if err := g.gatherDir(dir.path, acc); err != nil {
return err
acc.AddError(err)
}
}
@@ -224,7 +225,7 @@ var fileFormats = [...]fileFormat{
}
func numberOrString(s string) interface{} {
i, err := strconv.Atoi(s)
i, err := strconv.ParseInt(s, 10, 64)
if err == nil {
return i
}

View File

@@ -24,24 +24,24 @@ var cg1 = &CGroup{
func TestCgroupStatistics_1(t *testing.T) {
var acc testutil.Accumulator
err := cg1.Gather(&acc)
err := acc.GatherError(cg1.Gather)
require.NoError(t, err)
tags := map[string]string{
"path": "testdata/memory",
}
fields := map[string]interface{}{
"memory.stat.cache": 1739362304123123123,
"memory.stat.rss": 1775325184,
"memory.stat.rss_huge": 778043392,
"memory.stat.mapped_file": 421036032,
"memory.stat.dirty": -307200,
"memory.max_usage_in_bytes.0": 0,
"memory.max_usage_in_bytes.1": -1,
"memory.max_usage_in_bytes.2": 2,
"memory.limit_in_bytes": 223372036854771712,
"memory.stat.cache": int64(1739362304123123123),
"memory.stat.rss": int64(1775325184),
"memory.stat.rss_huge": int64(778043392),
"memory.stat.mapped_file": int64(421036032),
"memory.stat.dirty": int64(-307200),
"memory.max_usage_in_bytes.0": int64(0),
"memory.max_usage_in_bytes.1": int64(-1),
"memory.max_usage_in_bytes.2": int64(2),
"memory.limit_in_bytes": int64(223372036854771712),
"memory.use_hierarchy": "12-781",
"notify_on_release": 0,
"notify_on_release": int64(0),
}
acc.AssertContainsTaggedFields(t, "cgroup", fields, tags)
}
@@ -56,17 +56,17 @@ var cg2 = &CGroup{
func TestCgroupStatistics_2(t *testing.T) {
var acc testutil.Accumulator
err := cg2.Gather(&acc)
err := acc.GatherError(cg2.Gather)
require.NoError(t, err)
tags := map[string]string{
"path": "testdata/cpu",
}
fields := map[string]interface{}{
"cpuacct.usage_percpu.0": -1452543795404,
"cpuacct.usage_percpu.1": 1376681271659,
"cpuacct.usage_percpu.2": 1450950799997,
"cpuacct.usage_percpu.3": -1473113374257,
"cpuacct.usage_percpu.0": int64(-1452543795404),
"cpuacct.usage_percpu.1": int64(1376681271659),
"cpuacct.usage_percpu.2": int64(1450950799997),
"cpuacct.usage_percpu.3": int64(-1473113374257),
}
acc.AssertContainsTaggedFields(t, "cgroup", fields, tags)
}
@@ -81,14 +81,14 @@ var cg3 = &CGroup{
func TestCgroupStatistics_3(t *testing.T) {
var acc testutil.Accumulator
err := cg3.Gather(&acc)
err := acc.GatherError(cg3.Gather)
require.NoError(t, err)
tags := map[string]string{
"path": "testdata/memory/group_1",
}
fields := map[string]interface{}{
"memory.limit_in_bytes": 223372036854771712,
"memory.limit_in_bytes": int64(223372036854771712),
}
acc.AssertContainsTaggedFields(t, "cgroup", fields, tags)
@@ -108,14 +108,14 @@ var cg4 = &CGroup{
func TestCgroupStatistics_4(t *testing.T) {
var acc testutil.Accumulator
err := cg4.Gather(&acc)
err := acc.GatherError(cg4.Gather)
require.NoError(t, err)
tags := map[string]string{
"path": "testdata/memory/group_1/group_1_1",
}
fields := map[string]interface{}{
"memory.limit_in_bytes": 223372036854771712,
"memory.limit_in_bytes": int64(223372036854771712),
}
acc.AssertContainsTaggedFields(t, "cgroup", fields, tags)
@@ -140,14 +140,14 @@ var cg5 = &CGroup{
func TestCgroupStatistics_5(t *testing.T) {
var acc testutil.Accumulator
err := cg5.Gather(&acc)
err := acc.GatherError(cg5.Gather)
require.NoError(t, err)
tags := map[string]string{
"path": "testdata/memory/group_1/group_1_1",
}
fields := map[string]interface{}{
"memory.limit_in_bytes": 223372036854771712,
"memory.limit_in_bytes": int64(223372036854771712),
}
acc.AssertContainsTaggedFields(t, "cgroup", fields, tags)
@@ -167,16 +167,16 @@ var cg6 = &CGroup{
func TestCgroupStatistics_6(t *testing.T) {
var acc testutil.Accumulator
err := cg6.Gather(&acc)
err := acc.GatherError(cg6.Gather)
require.NoError(t, err)
tags := map[string]string{
"path": "testdata/memory",
}
fields := map[string]interface{}{
"memory.usage_in_bytes": 3513667584,
"memory.usage_in_bytes": int64(3513667584),
"memory.use_hierarchy": "12-781",
"memory.kmem.limit_in_bytes": 9223372036854771712,
"memory.kmem.limit_in_bytes": int64(9223372036854771712),
}
acc.AssertContainsTaggedFields(t, "cgroup", fields, tags)
}

View File

@@ -63,6 +63,7 @@ Delete second or Not synchronised.
### Measurements & Fields:
- chrony
- system_time (float, seconds)
- last_offset (float, seconds)
- rms_offset (float, seconds)
- frequency (float, ppm)
@@ -82,9 +83,9 @@ Delete second or Not synchronised.
### Example Output:
```
$ telegraf -config telegraf.conf -input-filter chrony -test
$ telegraf --config telegraf.conf --input-filter chrony --test
* Plugin: chrony, Collection 1
> chrony,leap_status=normal,reference_id=192.168.1.1,stratum=3 frequency=-35.657,last_offset=-0.000013616,residual_freq=-0,rms_offset=0.000027073,root_delay=0.000644,root_dispersion=0.003444,skew=0.001,update_interval=1031.2 1463750789687639161
> chrony,leap_status=normal,reference_id=192.168.1.1,stratum=3 frequency=-35.657,system_time=0.000027073,last_offset=-0.000013616,residual_freq=-0,rms_offset=0.000027073,root_delay=0.000644,root_dispersion=0.003444,skew=0.001,update_interval=1031.2 1463750789687639161
```

View File

@@ -1,5 +1,3 @@
// +build linux
package chrony
import (
@@ -92,7 +90,7 @@ func processChronycOutput(out string) (map[string]interface{}, map[string]string
}
name := strings.ToLower(strings.Replace(strings.TrimSpace(stats[0]), " ", "_", -1))
// ignore reference time
if strings.Contains(name, "time") {
if strings.Contains(name, "ref_time") {
continue
}
valueFields := strings.Fields(stats[1])

Some files were not shown because too many files have changed in this diff Show More